2024-12-03 04:56:57,239 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4d14b6c2 2024-12-03 04:56:57,259 main DEBUG Took 0.015849 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-03 04:56:57,260 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-03 04:56:57,260 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-03 04:56:57,261 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-03 04:56:57,263 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 04:56:57,273 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-03 04:56:57,289 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 04:56:57,291 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 04:56:57,292 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 04:56:57,292 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 04:56:57,293 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 04:56:57,293 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 04:56:57,294 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 04:56:57,294 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 04:56:57,295 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 04:56:57,295 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 04:56:57,297 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 04:56:57,297 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 04:56:57,298 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 04:56:57,298 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 04:56:57,299 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 04:56:57,299 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 04:56:57,300 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 04:56:57,300 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 04:56:57,300 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 04:56:57,301 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 04:56:57,306 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 04:56:57,306 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 04:56:57,307 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 04:56:57,307 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 04:56:57,307 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 04:56:57,308 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-03 04:56:57,310 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 04:56:57,312 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-03 04:56:57,314 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-03 04:56:57,315 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-03 04:56:57,316 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-03 04:56:57,317 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-03 04:56:57,335 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-03 04:56:57,338 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-03 04:56:57,340 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-03 04:56:57,341 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-03 04:56:57,341 main DEBUG createAppenders(={Console}) 2024-12-03 04:56:57,342 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4d14b6c2 initialized 2024-12-03 04:56:57,342 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4d14b6c2 2024-12-03 04:56:57,343 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@4d14b6c2 OK. 2024-12-03 04:56:57,344 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-03 04:56:57,344 main DEBUG OutputStream closed 2024-12-03 04:56:57,344 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-03 04:56:57,345 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-03 04:56:57,345 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@1de5f259 OK 2024-12-03 04:56:57,466 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-03 04:56:57,469 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-03 04:56:57,470 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-03 04:56:57,472 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-03 04:56:57,473 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-03 04:56:57,474 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-03 04:56:57,474 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-03 04:56:57,475 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-03 04:56:57,475 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-03 04:56:57,476 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-03 04:56:57,476 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-03 04:56:57,477 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-03 04:56:57,477 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-03 04:56:57,477 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-03 04:56:57,478 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-03 04:56:57,478 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-03 04:56:57,479 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-03 04:56:57,480 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-03 04:56:57,483 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-03 04:56:57,483 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@2766ca9d) with optional ClassLoader: null 2024-12-03 04:56:57,484 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-03 04:56:57,485 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@2766ca9d] started OK. 2024-12-03T04:56:57,818 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4 2024-12-03 04:56:57,822 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-03 04:56:57,822 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-03T04:56:57,833 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-03T04:56:57,889 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=545, ProcessCount=11, AvailableMemoryMB=4922 2024-12-03T04:56:57,892 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T04:56:57,913 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/cluster_2e3f5107-a8de-7682-7467-822af76e42a8, deleteOnExit=true 2024-12-03T04:56:57,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T04:56:57,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/test.cache.data in system properties and HBase conf 2024-12-03T04:56:57,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T04:56:57,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/hadoop.log.dir in system properties and HBase conf 2024-12-03T04:56:57,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T04:56:57,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T04:56:57,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T04:56:58,053 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-03T04:56:58,171 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T04:56:58,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T04:56:58,176 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T04:56:58,176 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T04:56:58,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T04:56:58,178 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T04:56:58,178 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T04:56:58,179 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T04:56:58,179 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T04:56:58,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T04:56:58,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/nfs.dump.dir in system properties and HBase conf 2024-12-03T04:56:58,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/java.io.tmpdir in system properties and HBase conf 2024-12-03T04:56:58,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T04:56:58,182 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T04:56:58,182 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T04:56:58,753 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T04:56:59,189 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-03T04:56:59,281 INFO [Time-limited test {}] log.Log(170): Logging initialized @3441ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-03T04:56:59,456 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:56:59,580 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:56:59,603 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:56:59,603 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:56:59,604 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T04:56:59,628 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:56:59,636 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75bdea07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:56:59,637 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@455f3457{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:56:59,867 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5f961078{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/java.io.tmpdir/jetty-localhost-45291-hadoop-hdfs-3_4_1-tests_jar-_-any-7285301834702867028/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T04:56:59,874 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@25dfddc5{HTTP/1.1, (http/1.1)}{localhost:45291} 2024-12-03T04:56:59,874 INFO [Time-limited test {}] server.Server(415): Started @4036ms 2024-12-03T04:56:59,910 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T04:57:00,324 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:57:00,333 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:57:00,334 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:57:00,335 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:57:00,335 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T04:57:00,337 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1557a369{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:57:00,338 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21c149f1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:57:00,509 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1224a347{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/java.io.tmpdir/jetty-localhost-43243-hadoop-hdfs-3_4_1-tests_jar-_-any-5711694677275152132/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:57:00,509 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@157c43ed{HTTP/1.1, (http/1.1)}{localhost:43243} 2024-12-03T04:57:00,510 INFO [Time-limited test {}] server.Server(415): Started @4672ms 2024-12-03T04:57:00,571 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T04:57:00,700 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:57:00,706 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:57:00,710 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:57:00,710 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:57:00,710 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T04:57:00,711 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fad11b5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:57:00,712 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f51668d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:57:00,847 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ead356f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/java.io.tmpdir/jetty-localhost-42709-hadoop-hdfs-3_4_1-tests_jar-_-any-15327300967356489081/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:57:00,848 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a4bcb2c{HTTP/1.1, (http/1.1)}{localhost:42709} 2024-12-03T04:57:00,848 INFO [Time-limited test {}] server.Server(415): Started @5011ms 2024-12-03T04:57:00,851 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T04:57:01,078 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/cluster_2e3f5107-a8de-7682-7467-822af76e42a8/data/data4/current/BP-1722606024-172.17.0.3-1733201818878/current, will proceed with Du for space computation calculation, 2024-12-03T04:57:01,078 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/cluster_2e3f5107-a8de-7682-7467-822af76e42a8/data/data1/current/BP-1722606024-172.17.0.3-1733201818878/current, will proceed with Du for space computation calculation, 2024-12-03T04:57:01,078 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/cluster_2e3f5107-a8de-7682-7467-822af76e42a8/data/data3/current/BP-1722606024-172.17.0.3-1733201818878/current, will proceed with Du for space computation calculation, 2024-12-03T04:57:01,086 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/cluster_2e3f5107-a8de-7682-7467-822af76e42a8/data/data2/current/BP-1722606024-172.17.0.3-1733201818878/current, will proceed with Du for space computation calculation, 2024-12-03T04:57:01,162 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T04:57:01,163 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T04:57:01,234 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4e78d47ac286dd23 with lease ID 0x94798bd0152f3cb0: Processing first storage report for DS-25bf40f5-d003-4c70-bb57-66af91e3226f from datanode DatanodeRegistration(127.0.0.1:41791, datanodeUuid=6e13a2ba-b388-42c9-a4b5-639a45d02156, infoPort=36489, infoSecurePort=0, ipcPort=45941, storageInfo=lv=-57;cid=testClusterID;nsid=721997856;c=1733201818878) 2024-12-03T04:57:01,235 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4e78d47ac286dd23 with lease ID 0x94798bd0152f3cb0: from storage DS-25bf40f5-d003-4c70-bb57-66af91e3226f node DatanodeRegistration(127.0.0.1:41791, datanodeUuid=6e13a2ba-b388-42c9-a4b5-639a45d02156, infoPort=36489, infoSecurePort=0, ipcPort=45941, storageInfo=lv=-57;cid=testClusterID;nsid=721997856;c=1733201818878), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-03T04:57:01,236 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3b1223a0cad8b731 with lease ID 0x94798bd0152f3caf: Processing first storage report for DS-34ae858c-ccec-4eb8-821a-d3b625271ead from datanode DatanodeRegistration(127.0.0.1:36129, datanodeUuid=669ad6ee-683d-4059-8bd9-b3b0b4724274, infoPort=46277, infoSecurePort=0, ipcPort=38369, storageInfo=lv=-57;cid=testClusterID;nsid=721997856;c=1733201818878) 2024-12-03T04:57:01,236 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3b1223a0cad8b731 with lease ID 0x94798bd0152f3caf: from storage DS-34ae858c-ccec-4eb8-821a-d3b625271ead node DatanodeRegistration(127.0.0.1:36129, datanodeUuid=669ad6ee-683d-4059-8bd9-b3b0b4724274, infoPort=46277, infoSecurePort=0, ipcPort=38369, storageInfo=lv=-57;cid=testClusterID;nsid=721997856;c=1733201818878), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:57:01,237 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4e78d47ac286dd23 with lease ID 0x94798bd0152f3cb0: Processing first storage report for DS-8d931013-1444-4828-94d5-9737740feaec from datanode DatanodeRegistration(127.0.0.1:41791, datanodeUuid=6e13a2ba-b388-42c9-a4b5-639a45d02156, infoPort=36489, infoSecurePort=0, ipcPort=45941, storageInfo=lv=-57;cid=testClusterID;nsid=721997856;c=1733201818878) 2024-12-03T04:57:01,237 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4e78d47ac286dd23 with lease ID 0x94798bd0152f3cb0: from storage DS-8d931013-1444-4828-94d5-9737740feaec node DatanodeRegistration(127.0.0.1:41791, datanodeUuid=6e13a2ba-b388-42c9-a4b5-639a45d02156, infoPort=36489, infoSecurePort=0, ipcPort=45941, storageInfo=lv=-57;cid=testClusterID;nsid=721997856;c=1733201818878), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T04:57:01,237 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3b1223a0cad8b731 with lease ID 0x94798bd0152f3caf: Processing first storage report for DS-30c00b3b-7117-492a-aa64-a329bc5b0d79 from datanode DatanodeRegistration(127.0.0.1:36129, datanodeUuid=669ad6ee-683d-4059-8bd9-b3b0b4724274, infoPort=46277, infoSecurePort=0, ipcPort=38369, storageInfo=lv=-57;cid=testClusterID;nsid=721997856;c=1733201818878) 2024-12-03T04:57:01,237 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3b1223a0cad8b731 with lease ID 0x94798bd0152f3caf: from storage DS-30c00b3b-7117-492a-aa64-a329bc5b0d79 node DatanodeRegistration(127.0.0.1:36129, datanodeUuid=669ad6ee-683d-4059-8bd9-b3b0b4724274, infoPort=46277, infoSecurePort=0, ipcPort=38369, storageInfo=lv=-57;cid=testClusterID;nsid=721997856;c=1733201818878), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:57:01,329 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4 2024-12-03T04:57:01,431 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/cluster_2e3f5107-a8de-7682-7467-822af76e42a8/zookeeper_0, clientPort=63841, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/cluster_2e3f5107-a8de-7682-7467-822af76e42a8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/cluster_2e3f5107-a8de-7682-7467-822af76e42a8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T04:57:01,444 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63841 2024-12-03T04:57:01,459 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:57:01,463 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:57:01,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741825_1001 (size=7) 2024-12-03T04:57:01,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741825_1001 (size=7) 2024-12-03T04:57:02,155 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4 with version=8 2024-12-03T04:57:02,155 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/hbase-staging 2024-12-03T04:57:02,269 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-03T04:57:02,564 INFO [Time-limited test {}] client.ConnectionUtils(128): master/32beccd76b35:0 server-side Connection retries=45 2024-12-03T04:57:02,575 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:57:02,576 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T04:57:02,581 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T04:57:02,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:57:02,581 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T04:57:02,768 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T04:57:02,838 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-03T04:57:02,848 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-03T04:57:02,852 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T04:57:02,890 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 4133 (auto-detected) 2024-12-03T04:57:02,891 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-12-03T04:57:02,916 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34303 2024-12-03T04:57:02,957 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34303 connecting to ZooKeeper ensemble=127.0.0.1:63841 2024-12-03T04:57:03,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:343030x0, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T04:57:03,012 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34303-0x1015173f3870000 connected 2024-12-03T04:57:03,092 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:57:03,095 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:57:03,113 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:57:03,118 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4, hbase.cluster.distributed=false 2024-12-03T04:57:03,150 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T04:57:03,156 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34303 2024-12-03T04:57:03,156 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34303 2024-12-03T04:57:03,157 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34303 2024-12-03T04:57:03,158 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34303 2024-12-03T04:57:03,158 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34303 2024-12-03T04:57:03,315 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/32beccd76b35:0 server-side Connection retries=45 2024-12-03T04:57:03,317 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:57:03,317 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T04:57:03,318 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T04:57:03,318 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:57:03,318 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T04:57:03,322 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T04:57:03,325 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T04:57:03,326 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43303 2024-12-03T04:57:03,328 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43303 connecting to ZooKeeper ensemble=127.0.0.1:63841 2024-12-03T04:57:03,329 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:57:03,334 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:57:03,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:433030x0, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T04:57:03,343 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43303-0x1015173f3870001 connected 2024-12-03T04:57:03,343 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:57:03,348 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T04:57:03,357 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T04:57:03,360 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T04:57:03,365 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T04:57:03,366 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43303 2024-12-03T04:57:03,366 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43303 2024-12-03T04:57:03,367 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43303 2024-12-03T04:57:03,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43303 2024-12-03T04:57:03,375 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43303 2024-12-03T04:57:03,390 DEBUG [M:0;32beccd76b35:34303 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;32beccd76b35:34303 2024-12-03T04:57:03,391 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/32beccd76b35,34303,1733201822340 2024-12-03T04:57:03,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T04:57:03,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T04:57:03,402 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/32beccd76b35,34303,1733201822340 2024-12-03T04:57:03,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:57:03,431 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T04:57:03,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T04:57:03,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:57:03,434 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/32beccd76b35,34303,1733201822340 from backup master directory 2024-12-03T04:57:03,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T04:57:03,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/32beccd76b35,34303,1733201822340 2024-12-03T04:57:03,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T04:57:03,441 WARN [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T04:57:03,442 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=32beccd76b35,34303,1733201822340 2024-12-03T04:57:03,444 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-03T04:57:03,446 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-03T04:57:03,508 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/hbase.id] with ID: 2fd9c88d-1ce8-49ff-8d1d-416d55232398 2024-12-03T04:57:03,508 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/.tmp/hbase.id 2024-12-03T04:57:03,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741826_1002 (size=42) 2024-12-03T04:57:03,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741826_1002 (size=42) 2024-12-03T04:57:03,536 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/.tmp/hbase.id]:[hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/hbase.id] 2024-12-03T04:57:03,593 INFO [master/32beccd76b35:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:57:03,597 INFO [master/32beccd76b35:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T04:57:03,619 INFO [master/32beccd76b35:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 20ms. 2024-12-03T04:57:03,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:57:03,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:57:03,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741827_1003 (size=196) 2024-12-03T04:57:03,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741827_1003 (size=196) 2024-12-03T04:57:03,675 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T04:57:03,677 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T04:57:03,685 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T04:57:03,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741828_1004 (size=1189) 2024-12-03T04:57:03,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741828_1004 (size=1189) 2024-12-03T04:57:03,857 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store 2024-12-03T04:57:03,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741829_1005 (size=34) 2024-12-03T04:57:03,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741829_1005 (size=34) 2024-12-03T04:57:03,908 INFO [master/32beccd76b35:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-03T04:57:03,911 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:57:03,913 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T04:57:03,913 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:57:03,913 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:57:03,915 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T04:57:03,915 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:57:03,915 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:57:03,917 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733201823912Disabling compacts and flushes for region at 1733201823912Disabling writes for close at 1733201823915 (+3 ms)Writing region close event to WAL at 1733201823915Closed at 1733201823915 2024-12-03T04:57:03,922 WARN [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/.initializing 2024-12-03T04:57:03,922 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/WALs/32beccd76b35,34303,1733201822340 2024-12-03T04:57:03,959 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C34303%2C1733201822340, suffix=, logDir=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/WALs/32beccd76b35,34303,1733201822340, archiveDir=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/oldWALs, maxLogs=10 2024-12-03T04:57:03,971 INFO [master/32beccd76b35:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C34303%2C1733201822340.1733201823965 2024-12-03T04:57:04,009 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/WALs/32beccd76b35,34303,1733201822340/32beccd76b35%2C34303%2C1733201822340.1733201823965 2024-12-03T04:57:04,030 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36489:36489),(127.0.0.1/127.0.0.1:46277:46277)] 2024-12-03T04:57:04,032 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T04:57:04,032 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:57:04,037 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:57:04,038 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:57:04,094 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:57:04,130 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T04:57:04,135 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:57:04,139 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:57:04,140 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:57:04,146 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T04:57:04,146 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:57:04,148 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T04:57:04,148 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:57:04,152 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T04:57:04,152 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:57:04,154 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T04:57:04,154 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:57:04,157 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T04:57:04,157 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:57:04,158 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T04:57:04,159 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:57:04,166 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:57:04,168 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:57:04,174 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:57:04,175 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:57:04,179 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T04:57:04,184 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:57:04,192 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T04:57:04,193 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=753508, jitterRate=-0.04186582565307617}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T04:57:04,201 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733201824057Initializing all the Stores at 1733201824060 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201824061 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201824061Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201824062 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201824062Cleaning up temporary data from old regions at 1733201824175 (+113 ms)Region opened successfully at 1733201824201 (+26 ms) 2024-12-03T04:57:04,203 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T04:57:04,249 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e8b043b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32beccd76b35/172.17.0.3:0 2024-12-03T04:57:04,293 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T04:57:04,309 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T04:57:04,310 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T04:57:04,314 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T04:57:04,316 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-03T04:57:04,322 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 6 msec 2024-12-03T04:57:04,322 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T04:57:04,362 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T04:57:04,374 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T04:57:04,377 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T04:57:04,381 INFO [master/32beccd76b35:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T04:57:04,383 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T04:57:04,384 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T04:57:04,388 INFO [master/32beccd76b35:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T04:57:04,392 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T04:57:04,394 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T04:57:04,396 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T04:57:04,398 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T04:57:04,419 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T04:57:04,421 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T04:57:04,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T04:57:04,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T04:57:04,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:57:04,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:57:04,431 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=32beccd76b35,34303,1733201822340, sessionid=0x1015173f3870000, setting cluster-up flag (Was=false) 2024-12-03T04:57:04,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:57:04,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:57:04,472 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T04:57:04,475 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=32beccd76b35,34303,1733201822340 2024-12-03T04:57:04,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:57:04,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:57:04,492 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T04:57:04,494 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=32beccd76b35,34303,1733201822340 2024-12-03T04:57:04,501 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T04:57:04,585 INFO [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(746): ClusterId : 2fd9c88d-1ce8-49ff-8d1d-416d55232398 2024-12-03T04:57:04,590 DEBUG [RS:0;32beccd76b35:43303 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T04:57:04,597 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T04:57:04,599 DEBUG [RS:0;32beccd76b35:43303 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T04:57:04,599 DEBUG [RS:0;32beccd76b35:43303 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T04:57:04,603 DEBUG [RS:0;32beccd76b35:43303 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T04:57:04,604 DEBUG [RS:0;32beccd76b35:43303 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26e004a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32beccd76b35/172.17.0.3:0 2024-12-03T04:57:04,610 INFO [master/32beccd76b35:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T04:57:04,623 INFO [master/32beccd76b35:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T04:57:04,627 DEBUG [RS:0;32beccd76b35:43303 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;32beccd76b35:43303 2024-12-03T04:57:04,631 INFO [RS:0;32beccd76b35:43303 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T04:57:04,631 INFO [RS:0;32beccd76b35:43303 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T04:57:04,629 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 32beccd76b35,34303,1733201822340 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T04:57:04,632 DEBUG [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T04:57:04,635 INFO [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(2659): reportForDuty to master=32beccd76b35,34303,1733201822340 with port=43303, startcode=1733201823270 2024-12-03T04:57:04,643 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T04:57:04,643 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T04:57:04,644 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T04:57:04,644 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T04:57:04,644 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/32beccd76b35:0, corePoolSize=10, maxPoolSize=10 2024-12-03T04:57:04,644 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:57:04,644 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/32beccd76b35:0, corePoolSize=2, maxPoolSize=2 2024-12-03T04:57:04,645 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:57:04,647 DEBUG [RS:0;32beccd76b35:43303 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T04:57:04,651 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T04:57:04,652 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T04:57:04,661 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:57:04,662 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T04:57:04,665 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733201854665 2024-12-03T04:57:04,667 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T04:57:04,669 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T04:57:04,672 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T04:57:04,673 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T04:57:04,673 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T04:57:04,673 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T04:57:04,684 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:04,691 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T04:57:04,693 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T04:57:04,693 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T04:57:04,702 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T04:57:04,702 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T04:57:04,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741831_1007 (size=1321) 2024-12-03T04:57:04,705 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733201824704,5,FailOnTimeoutGroup] 2024-12-03T04:57:04,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741831_1007 (size=1321) 2024-12-03T04:57:04,707 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733201824706,5,FailOnTimeoutGroup] 2024-12-03T04:57:04,707 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:04,707 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T04:57:04,708 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T04:57:04,709 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4 2024-12-03T04:57:04,709 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:04,709 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:04,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741832_1008 (size=32) 2024-12-03T04:57:04,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741832_1008 (size=32) 2024-12-03T04:57:04,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:57:04,755 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55337, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T04:57:04,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T04:57:04,760 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T04:57:04,760 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:57:04,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:57:04,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T04:57:04,763 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34303 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 32beccd76b35,43303,1733201823270 2024-12-03T04:57:04,766 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T04:57:04,766 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34303 {}] master.ServerManager(517): Registering regionserver=32beccd76b35,43303,1733201823270 2024-12-03T04:57:04,766 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:57:04,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:57:04,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T04:57:04,771 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T04:57:04,771 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:57:04,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:57:04,775 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T04:57:04,778 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T04:57:04,778 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:57:04,779 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:57:04,779 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T04:57:04,781 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740 2024-12-03T04:57:04,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740 2024-12-03T04:57:04,785 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T04:57:04,785 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T04:57:04,787 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T04:57:04,790 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T04:57:04,793 DEBUG [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4 2024-12-03T04:57:04,793 DEBUG [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44433 2024-12-03T04:57:04,793 DEBUG [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T04:57:04,795 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T04:57:04,796 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=703354, jitterRate=-0.10563987493515015}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T04:57:04,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T04:57:04,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733201824753Initializing all the Stores at 1733201824756 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201824756Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201824756Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201824756Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201824756Cleaning up temporary data from old regions at 1733201824785 (+29 ms)Region opened successfully at 1733201824800 (+15 ms) 2024-12-03T04:57:04,801 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T04:57:04,801 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T04:57:04,801 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T04:57:04,801 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T04:57:04,801 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T04:57:04,802 DEBUG [RS:0;32beccd76b35:43303 {}] zookeeper.ZKUtil(111): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/32beccd76b35,43303,1733201823270 2024-12-03T04:57:04,802 WARN [RS:0;32beccd76b35:43303 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T04:57:04,803 INFO [RS:0;32beccd76b35:43303 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T04:57:04,803 DEBUG [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270 2024-12-03T04:57:04,808 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T04:57:04,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733201824800Disabling compacts and flushes for region at 1733201824800Disabling writes for close at 1733201824801 (+1 ms)Writing region close event to WAL at 1733201824807 (+6 ms)Closed at 1733201824808 (+1 ms) 2024-12-03T04:57:04,812 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [32beccd76b35,43303,1733201823270] 2024-12-03T04:57:04,813 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T04:57:04,813 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T04:57:04,822 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T04:57:04,835 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T04:57:04,838 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T04:57:04,851 INFO [RS:0;32beccd76b35:43303 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T04:57:04,867 INFO [RS:0;32beccd76b35:43303 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T04:57:04,873 INFO [RS:0;32beccd76b35:43303 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T04:57:04,874 INFO [RS:0;32beccd76b35:43303 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:04,886 INFO [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T04:57:04,894 INFO [RS:0;32beccd76b35:43303 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T04:57:04,896 INFO [RS:0;32beccd76b35:43303 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:04,897 DEBUG [RS:0;32beccd76b35:43303 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:57:04,897 DEBUG [RS:0;32beccd76b35:43303 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:57:04,897 DEBUG [RS:0;32beccd76b35:43303 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:57:04,898 DEBUG [RS:0;32beccd76b35:43303 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:57:04,898 DEBUG [RS:0;32beccd76b35:43303 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:57:04,898 DEBUG [RS:0;32beccd76b35:43303 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/32beccd76b35:0, corePoolSize=2, maxPoolSize=2 2024-12-03T04:57:04,898 DEBUG [RS:0;32beccd76b35:43303 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:57:04,899 DEBUG [RS:0;32beccd76b35:43303 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:57:04,899 DEBUG [RS:0;32beccd76b35:43303 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:57:04,899 DEBUG [RS:0;32beccd76b35:43303 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:57:04,899 DEBUG [RS:0;32beccd76b35:43303 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:57:04,900 DEBUG [RS:0;32beccd76b35:43303 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:57:04,900 DEBUG [RS:0;32beccd76b35:43303 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/32beccd76b35:0, corePoolSize=3, maxPoolSize=3 2024-12-03T04:57:04,900 DEBUG [RS:0;32beccd76b35:43303 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0, corePoolSize=3, maxPoolSize=3 2024-12-03T04:57:04,902 INFO [RS:0;32beccd76b35:43303 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:04,902 INFO [RS:0;32beccd76b35:43303 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:04,902 INFO [RS:0;32beccd76b35:43303 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:04,902 INFO [RS:0;32beccd76b35:43303 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:04,902 INFO [RS:0;32beccd76b35:43303 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:04,902 INFO [RS:0;32beccd76b35:43303 {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,43303,1733201823270-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T04:57:04,923 INFO [RS:0;32beccd76b35:43303 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T04:57:04,925 INFO [RS:0;32beccd76b35:43303 {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,43303,1733201823270-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:04,925 INFO [RS:0;32beccd76b35:43303 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:04,926 INFO [RS:0;32beccd76b35:43303 {}] regionserver.Replication(171): 32beccd76b35,43303,1733201823270 started 2024-12-03T04:57:04,946 INFO [RS:0;32beccd76b35:43303 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:04,947 INFO [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(1482): Serving as 32beccd76b35,43303,1733201823270, RpcServer on 32beccd76b35/172.17.0.3:43303, sessionid=0x1015173f3870001 2024-12-03T04:57:04,948 DEBUG [RS:0;32beccd76b35:43303 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T04:57:04,948 DEBUG [RS:0;32beccd76b35:43303 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 32beccd76b35,43303,1733201823270 2024-12-03T04:57:04,948 DEBUG [RS:0;32beccd76b35:43303 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32beccd76b35,43303,1733201823270' 2024-12-03T04:57:04,948 DEBUG [RS:0;32beccd76b35:43303 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T04:57:04,949 DEBUG [RS:0;32beccd76b35:43303 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T04:57:04,950 DEBUG [RS:0;32beccd76b35:43303 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T04:57:04,950 DEBUG [RS:0;32beccd76b35:43303 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T04:57:04,950 DEBUG [RS:0;32beccd76b35:43303 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 32beccd76b35,43303,1733201823270 2024-12-03T04:57:04,950 DEBUG [RS:0;32beccd76b35:43303 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32beccd76b35,43303,1733201823270' 2024-12-03T04:57:04,950 DEBUG [RS:0;32beccd76b35:43303 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T04:57:04,951 DEBUG [RS:0;32beccd76b35:43303 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T04:57:04,952 DEBUG [RS:0;32beccd76b35:43303 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T04:57:04,952 INFO [RS:0;32beccd76b35:43303 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T04:57:04,952 INFO [RS:0;32beccd76b35:43303 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T04:57:04,990 WARN [32beccd76b35:34303 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T04:57:05,074 INFO [RS:0;32beccd76b35:43303 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C43303%2C1733201823270, suffix=, logDir=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270, archiveDir=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/oldWALs, maxLogs=32 2024-12-03T04:57:05,078 INFO [RS:0;32beccd76b35:43303 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C43303%2C1733201823270.1733201825077 2024-12-03T04:57:05,132 INFO [RS:0;32beccd76b35:43303 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201825077 2024-12-03T04:57:05,153 DEBUG [RS:0;32beccd76b35:43303 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46277:46277),(127.0.0.1/127.0.0.1:36489:36489)] 2024-12-03T04:57:05,243 DEBUG [32beccd76b35:34303 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T04:57:05,271 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=32beccd76b35,43303,1733201823270 2024-12-03T04:57:05,295 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 32beccd76b35,43303,1733201823270, state=OPENING 2024-12-03T04:57:05,303 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T04:57:05,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:57:05,307 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T04:57:05,309 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:57:05,309 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T04:57:05,310 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T04:57:05,312 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=32beccd76b35,43303,1733201823270}] 2024-12-03T04:57:05,509 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T04:57:05,516 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34481, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T04:57:05,531 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T04:57:05,531 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T04:57:05,536 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C43303%2C1733201823270.meta, suffix=.meta, logDir=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270, archiveDir=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/oldWALs, maxLogs=32 2024-12-03T04:57:05,539 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C43303%2C1733201823270.meta.1733201825538.meta 2024-12-03T04:57:05,558 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.meta.1733201825538.meta 2024-12-03T04:57:05,565 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36489:36489),(127.0.0.1/127.0.0.1:46277:46277)] 2024-12-03T04:57:05,567 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T04:57:05,570 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T04:57:05,574 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T04:57:05,580 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T04:57:05,586 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T04:57:05,587 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:57:05,587 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T04:57:05,587 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T04:57:05,592 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T04:57:05,594 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T04:57:05,594 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:57:05,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:57:05,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T04:57:05,598 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T04:57:05,598 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:57:05,599 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:57:05,599 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T04:57:05,601 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T04:57:05,601 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:57:05,602 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:57:05,602 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T04:57:05,604 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T04:57:05,604 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:57:05,605 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:57:05,605 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T04:57:05,607 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740 2024-12-03T04:57:05,610 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740 2024-12-03T04:57:05,614 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T04:57:05,615 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T04:57:05,616 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T04:57:05,619 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T04:57:05,621 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800869, jitterRate=0.018358826637268066}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T04:57:05,622 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T04:57:05,623 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733201825588Writing region info on filesystem at 1733201825588Initializing all the Stores at 1733201825591 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201825591Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201825592 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201825592Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201825592Cleaning up temporary data from old regions at 1733201825615 (+23 ms)Running coprocessor post-open hooks at 1733201825622 (+7 ms)Region opened successfully at 1733201825623 (+1 ms) 2024-12-03T04:57:05,633 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733201825496 2024-12-03T04:57:05,660 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=32beccd76b35,43303,1733201823270 2024-12-03T04:57:05,663 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 32beccd76b35,43303,1733201823270, state=OPEN 2024-12-03T04:57:05,665 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T04:57:05,666 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T04:57:05,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T04:57:05,672 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T04:57:05,672 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=32beccd76b35,43303,1733201823270 2024-12-03T04:57:05,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T04:57:05,674 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T04:57:05,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T04:57:05,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=32beccd76b35,43303,1733201823270 in 361 msec 2024-12-03T04:57:05,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T04:57:05,695 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 860 msec 2024-12-03T04:57:05,698 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T04:57:05,699 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T04:57:05,728 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T04:57:05,730 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=32beccd76b35,43303,1733201823270, seqNum=-1] 2024-12-03T04:57:05,757 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T04:57:05,759 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42809, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T04:57:05,784 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2460 sec 2024-12-03T04:57:05,784 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733201825784, completionTime=-1 2024-12-03T04:57:05,786 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T04:57:05,787 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T04:57:05,815 INFO [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T04:57:05,815 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733201885815 2024-12-03T04:57:05,816 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733201945816 2024-12-03T04:57:05,816 INFO [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 29 msec 2024-12-03T04:57:05,819 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,34303,1733201822340-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:05,819 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,34303,1733201822340-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:05,820 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,34303,1733201822340-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:05,821 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-32beccd76b35:34303, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:05,822 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:05,824 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:05,828 DEBUG [master/32beccd76b35:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T04:57:05,875 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.433sec 2024-12-03T04:57:05,878 INFO [master/32beccd76b35:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T04:57:05,880 INFO [master/32beccd76b35:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T04:57:05,881 INFO [master/32beccd76b35:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T04:57:05,885 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T04:57:05,885 INFO [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T04:57:05,890 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,34303,1733201822340-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T04:57:05,891 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,34303,1733201822340-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T04:57:05,892 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@156df74a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T04:57:05,895 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-03T04:57:05,895 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-03T04:57:05,900 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 32beccd76b35,34303,-1 for getting cluster id 2024-12-03T04:57:05,904 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T04:57:05,911 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T04:57:05,912 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T04:57:05,912 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,34303,1733201822340-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:57:05,916 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2fd9c88d-1ce8-49ff-8d1d-416d55232398' 2024-12-03T04:57:05,920 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T04:57:05,920 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2fd9c88d-1ce8-49ff-8d1d-416d55232398" 2024-12-03T04:57:05,923 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@711a6ca1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T04:57:05,924 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [32beccd76b35,34303,-1] 2024-12-03T04:57:05,927 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T04:57:05,930 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:57:05,931 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45172, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T04:57:05,935 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d2c6b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T04:57:05,935 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T04:57:05,943 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=32beccd76b35,43303,1733201823270, seqNum=-1] 2024-12-03T04:57:05,944 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T04:57:05,946 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37628, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T04:57:06,003 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=32beccd76b35,34303,1733201822340 2024-12-03T04:57:06,004 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:57:06,025 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T04:57:06,032 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T04:57:06,038 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 32beccd76b35,34303,1733201822340 2024-12-03T04:57:06,048 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@12da07e6 2024-12-03T04:57:06,050 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T04:57:06,052 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45178, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T04:57:06,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34303 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T04:57:06,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34303 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T04:57:06,060 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34303 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T04:57:06,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34303 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-03T04:57:06,074 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T04:57:06,077 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34303 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-03T04:57:06,077 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:57:06,080 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T04:57:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34303 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T04:57:06,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741835_1011 (size=389) 2024-12-03T04:57:06,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741835_1011 (size=389) 2024-12-03T04:57:06,162 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7b4f7fa158df258d504886f3e9969c39, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4 2024-12-03T04:57:06,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741836_1012 (size=72) 2024-12-03T04:57:06,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741836_1012 (size=72) 2024-12-03T04:57:06,600 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:57:06,600 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 7b4f7fa158df258d504886f3e9969c39, disabling compactions & flushes 2024-12-03T04:57:06,601 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. 2024-12-03T04:57:06,601 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. 2024-12-03T04:57:06,601 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. after waiting 0 ms 2024-12-03T04:57:06,601 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. 2024-12-03T04:57:06,601 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. 2024-12-03T04:57:06,601 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7b4f7fa158df258d504886f3e9969c39: Waiting for close lock at 1733201826600Disabling compacts and flushes for region at 1733201826600Disabling writes for close at 1733201826601 (+1 ms)Writing region close event to WAL at 1733201826601Closed at 1733201826601 2024-12-03T04:57:06,607 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T04:57:06,614 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733201826607"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733201826607"}]},"ts":"1733201826607"} 2024-12-03T04:57:06,624 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T04:57:06,627 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T04:57:06,632 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733201826627"}]},"ts":"1733201826627"} 2024-12-03T04:57:06,639 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-03T04:57:06,642 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=7b4f7fa158df258d504886f3e9969c39, ASSIGN}] 2024-12-03T04:57:06,646 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=7b4f7fa158df258d504886f3e9969c39, ASSIGN 2024-12-03T04:57:06,648 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=7b4f7fa158df258d504886f3e9969c39, ASSIGN; state=OFFLINE, location=32beccd76b35,43303,1733201823270; forceNewPlan=false, retain=false 2024-12-03T04:57:06,801 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7b4f7fa158df258d504886f3e9969c39, regionState=OPENING, regionLocation=32beccd76b35,43303,1733201823270 2024-12-03T04:57:06,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=7b4f7fa158df258d504886f3e9969c39, ASSIGN because future has completed 2024-12-03T04:57:06,811 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7b4f7fa158df258d504886f3e9969c39, server=32beccd76b35,43303,1733201823270}] 2024-12-03T04:57:06,977 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. 2024-12-03T04:57:06,978 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7b4f7fa158df258d504886f3e9969c39, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39.', STARTKEY => '', ENDKEY => ''} 2024-12-03T04:57:06,978 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:57:06,979 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:57:06,979 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:57:06,979 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:57:06,982 INFO [StoreOpener-7b4f7fa158df258d504886f3e9969c39-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:57:06,984 INFO [StoreOpener-7b4f7fa158df258d504886f3e9969c39-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b4f7fa158df258d504886f3e9969c39 columnFamilyName info 2024-12-03T04:57:06,984 DEBUG [StoreOpener-7b4f7fa158df258d504886f3e9969c39-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:57:06,985 INFO [StoreOpener-7b4f7fa158df258d504886f3e9969c39-1 {}] regionserver.HStore(327): Store=7b4f7fa158df258d504886f3e9969c39/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T04:57:06,985 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:57:06,986 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:57:06,987 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:57:06,990 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:57:06,990 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:57:06,993 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:57:07,001 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T04:57:07,002 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7b4f7fa158df258d504886f3e9969c39; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766395, jitterRate=-0.02547873556613922}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T04:57:07,003 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:57:07,004 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7b4f7fa158df258d504886f3e9969c39: Running coprocessor pre-open hook at 1733201826979Writing region info on filesystem at 1733201826979Initializing all the Stores at 1733201826981 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201826981Cleaning up temporary data from old regions at 1733201826990 (+9 ms)Running coprocessor post-open hooks at 1733201827003 (+13 ms)Region opened successfully at 1733201827004 (+1 ms) 2024-12-03T04:57:07,007 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39., pid=6, masterSystemTime=1733201826966 2024-12-03T04:57:07,011 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. 2024-12-03T04:57:07,011 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. 2024-12-03T04:57:07,012 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7b4f7fa158df258d504886f3e9969c39, regionState=OPEN, openSeqNum=2, regionLocation=32beccd76b35,43303,1733201823270 2024-12-03T04:57:07,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7b4f7fa158df258d504886f3e9969c39, server=32beccd76b35,43303,1733201823270 because future has completed 2024-12-03T04:57:07,023 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T04:57:07,023 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7b4f7fa158df258d504886f3e9969c39, server=32beccd76b35,43303,1733201823270 in 208 msec 2024-12-03T04:57:07,029 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T04:57:07,029 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=7b4f7fa158df258d504886f3e9969c39, ASSIGN in 382 msec 2024-12-03T04:57:07,031 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T04:57:07,031 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733201827031"}]},"ts":"1733201827031"} 2024-12-03T04:57:07,035 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-03T04:57:07,037 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T04:57:07,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 974 msec 2024-12-03T04:57:11,094 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-03T04:57:11,140 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T04:57:11,141 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-03T04:57:12,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T04:57:12,834 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-03T04:57:12,837 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-03T04:57:12,837 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-03T04:57:12,838 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T04:57:12,838 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-03T04:57:12,839 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-03T04:57:12,839 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-03T04:57:16,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34303 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T04:57:16,101 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-03T04:57:16,104 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-03T04:57:16,112 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-03T04:57:16,112 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. 2024-12-03T04:57:16,114 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C43303%2C1733201823270.1733201836113 2024-12-03T04:57:16,141 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:57:16,141 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:57:16,141 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:57:16,141 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:57:16,142 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:57:16,142 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201825077 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201836113 2024-12-03T04:57:16,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741833_1009 (size=451) 2024-12-03T04:57:16,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741833_1009 (size=451) 2024-12-03T04:57:16,149 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46277:46277),(127.0.0.1/127.0.0.1:36489:36489)] 2024-12-03T04:57:16,150 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201825077 to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/oldWALs/32beccd76b35%2C43303%2C1733201823270.1733201825077 2024-12-03T04:57:16,160 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39., hostname=32beccd76b35,43303,1733201823270, seqNum=2] 2024-12-03T04:57:28,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43303 {}] regionserver.HRegion(8855): Flush requested on 7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:57:28,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7b4f7fa158df258d504886f3e9969c39 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T04:57:28,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/b0815949d8f7411782cf0a61b569ec8d is 1080, key is row0001/info:/1733201836163/Put/seqid=0 2024-12-03T04:57:28,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741838_1014 (size=12509) 2024-12-03T04:57:28,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741838_1014 (size=12509) 2024-12-03T04:57:28,374 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/b0815949d8f7411782cf0a61b569ec8d 2024-12-03T04:57:28,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/b0815949d8f7411782cf0a61b569ec8d as hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/b0815949d8f7411782cf0a61b569ec8d 2024-12-03T04:57:28,460 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/b0815949d8f7411782cf0a61b569ec8d, entries=7, sequenceid=11, filesize=12.2 K 2024-12-03T04:57:28,474 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 7b4f7fa158df258d504886f3e9969c39 in 251ms, sequenceid=11, compaction requested=false 2024-12-03T04:57:28,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7b4f7fa158df258d504886f3e9969c39: 2024-12-03T04:57:31,325 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T04:57:36,232 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C43303%2C1733201823270.1733201856231 2024-12-03T04:57:36,440 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:57:36,440 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:57:36,440 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:57:36,441 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:57:36,441 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:57:36,441 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:57:36,441 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201836113 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201856231 2024-12-03T04:57:36,442 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46277:46277),(127.0.0.1/127.0.0.1:36489:36489)] 2024-12-03T04:57:36,442 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201836113 is not closed yet, will try archiving it next time 2024-12-03T04:57:36,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741837_1013 (size=12399) 2024-12-03T04:57:36,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741837_1013 (size=12399) 2024-12-03T04:57:36,647 INFO [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:57:38,851 INFO [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:57:41,058 INFO [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:57:43,263 INFO [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:57:43,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43303 {}] regionserver.HRegion(8855): Flush requested on 7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:57:43,264 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7b4f7fa158df258d504886f3e9969c39 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T04:57:43,466 INFO [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:57:43,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/bc4415cdf3ff40ef8111757ecf660f47 is 1080, key is row0008/info:/1733201850219/Put/seqid=0 2024-12-03T04:57:43,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741840_1016 (size=12509) 2024-12-03T04:57:43,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741840_1016 (size=12509) 2024-12-03T04:57:43,494 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/bc4415cdf3ff40ef8111757ecf660f47 2024-12-03T04:57:43,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/bc4415cdf3ff40ef8111757ecf660f47 as hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/bc4415cdf3ff40ef8111757ecf660f47 2024-12-03T04:57:43,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/bc4415cdf3ff40ef8111757ecf660f47, entries=7, sequenceid=21, filesize=12.2 K 2024-12-03T04:57:43,730 INFO [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:57:43,730 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 7b4f7fa158df258d504886f3e9969c39 in 466ms, sequenceid=21, compaction requested=false 2024-12-03T04:57:43,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7b4f7fa158df258d504886f3e9969c39: 2024-12-03T04:57:43,731 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-03T04:57:43,731 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T04:57:43,732 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/b0815949d8f7411782cf0a61b569ec8d because midkey is the same as first or last row 2024-12-03T04:57:45,470 INFO [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:57:45,925 INFO [master/32beccd76b35:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T04:57:45,925 INFO [master/32beccd76b35:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T04:57:47,679 INFO [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1368): Slow sync cost: 204 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:57:47,681 WARN [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:57:47,683 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 32beccd76b35%2C43303%2C1733201823270:(num 1733201856231) roll requested 2024-12-03T04:57:47,684 INFO [regionserver/32beccd76b35:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C43303%2C1733201823270.1733201867683 2024-12-03T04:57:47,902 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 214 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:57:47,905 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:57:47,905 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:57:47,906 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:57:47,906 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:57:47,909 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:57:47,910 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201856231 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201867683 2024-12-03T04:57:47,913 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36489:36489),(127.0.0.1/127.0.0.1:46277:46277)] 2024-12-03T04:57:47,913 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201856231 is not closed yet, will try archiving it next time 2024-12-03T04:57:47,913 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201836113 to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/oldWALs/32beccd76b35%2C43303%2C1733201823270.1733201836113 2024-12-03T04:57:47,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741839_1015 (size=7739) 2024-12-03T04:57:47,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741839_1015 (size=7739) 2024-12-03T04:57:49,887 INFO [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK]] 2024-12-03T04:57:51,979 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7b4f7fa158df258d504886f3e9969c39, had cached 0 bytes from a total of 25018 2024-12-03T04:57:52,092 INFO [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK]] 2024-12-03T04:57:54,296 INFO [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK]] 2024-12-03T04:57:56,504 INFO [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK]] 2024-12-03T04:57:58,506 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T04:57:58,507 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C43303%2C1733201823270.1733201878507 2024-12-03T04:58:01,325 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T04:58:03,516 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK]] 2024-12-03T04:58:03,518 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK]] 2024-12-03T04:58:03,518 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 32beccd76b35%2C43303%2C1733201823270:(num 1733201878507) roll requested 2024-12-03T04:58:03,518 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:03,518 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:03,518 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:03,519 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:03,519 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:03,519 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201867683 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201878507 2024-12-03T04:58:03,520 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46277:46277),(127.0.0.1/127.0.0.1:36489:36489)] 2024-12-03T04:58:03,520 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201867683 is not closed yet, will try archiving it next time 2024-12-03T04:58:03,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741841_1017 (size=4753) 2024-12-03T04:58:03,522 INFO [regionserver/32beccd76b35:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C43303%2C1733201823270.1733201883521 2024-12-03T04:58:03,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741841_1017 (size=4753) 2024-12-03T04:58:08,525 INFO [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:58:08,526 WARN [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:58:08,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43303 {}] regionserver.HRegion(8855): Flush requested on 7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:58:08,526 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7b4f7fa158df258d504886f3e9969c39 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T04:58:08,559 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5029 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:58:08,559 WARN [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5029 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:58:10,534 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T04:58:13,529 INFO [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:58:13,529 WARN [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK], DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK]] 2024-12-03T04:58:13,529 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:13,529 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:13,529 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:13,530 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:13,530 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:13,530 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201878507 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201883521 2024-12-03T04:58:13,531 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36489:36489),(127.0.0.1/127.0.0.1:46277:46277)] 2024-12-03T04:58:13,532 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201878507 is not closed yet, will try archiving it next time 2024-12-03T04:58:13,532 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 32beccd76b35%2C43303%2C1733201823270:(num 1733201883521) roll requested 2024-12-03T04:58:13,532 INFO [regionserver/32beccd76b35:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C43303%2C1733201823270.1733201893532 2024-12-03T04:58:13,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741842_1018 (size=1569) 2024-12-03T04:58:13,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741842_1018 (size=1569) 2024-12-03T04:58:13,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/a5b028da81444512bea2228ad8e0619a is 1080, key is row0015/info:/1733201865267/Put/seqid=0 2024-12-03T04:58:13,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741844_1020 (size=12509) 2024-12-03T04:58:13,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741844_1020 (size=12509) 2024-12-03T04:58:13,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/a5b028da81444512bea2228ad8e0619a 2024-12-03T04:58:13,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/a5b028da81444512bea2228ad8e0619a as hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/a5b028da81444512bea2228ad8e0619a 2024-12-03T04:58:13,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/a5b028da81444512bea2228ad8e0619a, entries=7, sequenceid=31, filesize=12.2 K 2024-12-03T04:58:18,573 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5038 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK]] 2024-12-03T04:58:18,574 WARN [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5038 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK]] 2024-12-03T04:58:18,574 INFO [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK]] 2024-12-03T04:58:18,574 WARN [FSHLog-0-hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4-prefix:32beccd76b35,43303,1733201823270 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41791,DS-25bf40f5-d003-4c70-bb57-66af91e3226f,DISK], DatanodeInfoWithStorage[127.0.0.1:36129,DS-34ae858c-ccec-4eb8-821a-d3b625271ead,DISK]] 2024-12-03T04:58:18,574 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 7b4f7fa158df258d504886f3e9969c39 in 10048ms, sequenceid=31, compaction requested=true 2024-12-03T04:58:18,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7b4f7fa158df258d504886f3e9969c39: 2024-12-03T04:58:18,575 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-03T04:58:18,575 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T04:58:18,575 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/b0815949d8f7411782cf0a61b569ec8d because midkey is the same as first or last row 2024-12-03T04:58:18,577 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b4f7fa158df258d504886f3e9969c39:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T04:58:18,577 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:18,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T04:58:18,581 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:18,582 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:18,585 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:18,585 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:18,586 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201883521 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201893532 2024-12-03T04:58:18,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741843_1019 (size=438) 2024-12-03T04:58:18,589 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T04:58:18,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741843_1019 (size=438) 2024-12-03T04:58:18,593 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201856231 to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/oldWALs/32beccd76b35%2C43303%2C1733201823270.1733201856231 2024-12-03T04:58:18,595 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201867683 to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/oldWALs/32beccd76b35%2C43303%2C1733201823270.1733201867683 2024-12-03T04:58:18,602 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201878507 to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/oldWALs/32beccd76b35%2C43303%2C1733201823270.1733201878507 2024-12-03T04:58:18,603 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201883521 to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/oldWALs/32beccd76b35%2C43303%2C1733201823270.1733201883521 2024-12-03T04:58:18,604 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T04:58:18,607 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.HStore(1541): 7b4f7fa158df258d504886f3e9969c39/info is initiating minor compaction (all files) 2024-12-03T04:58:18,607 INFO [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 7b4f7fa158df258d504886f3e9969c39/info in TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. 2024-12-03T04:58:18,608 INFO [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/b0815949d8f7411782cf0a61b569ec8d, hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/bc4415cdf3ff40ef8111757ecf660f47, hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/a5b028da81444512bea2228ad8e0619a] into tmpdir=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp, totalSize=36.6 K 2024-12-03T04:58:18,618 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36489:36489),(127.0.0.1/127.0.0.1:46277:46277)] 2024-12-03T04:58:18,618 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 32beccd76b35%2C43303%2C1733201823270:(num 1733201893532) roll requested 2024-12-03T04:58:18,618 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C43303%2C1733201823270.1733201898618 2024-12-03T04:58:18,621 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] compactions.Compactor(225): Compacting b0815949d8f7411782cf0a61b569ec8d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733201836163 2024-12-03T04:58:18,622 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] compactions.Compactor(225): Compacting bc4415cdf3ff40ef8111757ecf660f47, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733201850219 2024-12-03T04:58:18,623 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] compactions.Compactor(225): Compacting a5b028da81444512bea2228ad8e0619a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733201865267 2024-12-03T04:58:18,674 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:18,674 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:18,674 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:18,674 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:18,675 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:18,675 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201893532 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201898618 2024-12-03T04:58:18,678 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36489:36489),(127.0.0.1/127.0.0.1:46277:46277)] 2024-12-03T04:58:18,678 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201893532 is not closed yet, will try archiving it next time 2024-12-03T04:58:18,680 INFO [regionserver/32beccd76b35:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C43303%2C1733201823270.1733201898679 2024-12-03T04:58:18,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741845_1021 (size=93) 2024-12-03T04:58:18,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741845_1021 (size=93) 2024-12-03T04:58:18,709 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201893532 to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/oldWALs/32beccd76b35%2C43303%2C1733201823270.1733201893532 2024-12-03T04:58:18,715 INFO [RS:0;32beccd76b35:43303-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b4f7fa158df258d504886f3e9969c39#info#compaction#3 average throughput is 2.69 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T04:58:18,717 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/701d931e88634fe789931921e3892972 is 1080, key is row0001/info:/1733201836163/Put/seqid=0 2024-12-03T04:58:18,739 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:18,739 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:18,740 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:18,740 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:18,740 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:18,741 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201898618 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/WALs/32beccd76b35,43303,1733201823270/32beccd76b35%2C43303%2C1733201823270.1733201898679 2024-12-03T04:58:18,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741846_1022 (size=1258) 2024-12-03T04:58:18,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741846_1022 (size=1258) 2024-12-03T04:58:18,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741848_1024 (size=27710) 2024-12-03T04:58:18,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741848_1024 (size=27710) 2024-12-03T04:58:18,784 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46277:46277),(127.0.0.1/127.0.0.1:36489:36489)] 2024-12-03T04:58:18,815 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/701d931e88634fe789931921e3892972 as hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/701d931e88634fe789931921e3892972 2024-12-03T04:58:18,842 INFO [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7b4f7fa158df258d504886f3e9969c39/info of 7b4f7fa158df258d504886f3e9969c39 into 701d931e88634fe789931921e3892972(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T04:58:18,842 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 7b4f7fa158df258d504886f3e9969c39: 2024-12-03T04:58:18,844 INFO [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39., storeName=7b4f7fa158df258d504886f3e9969c39/info, priority=13, startTime=1733201898576; duration=0sec 2024-12-03T04:58:18,845 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-03T04:58:18,845 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T04:58:18,845 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/701d931e88634fe789931921e3892972 because midkey is the same as first or last row 2024-12-03T04:58:18,845 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-03T04:58:18,845 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T04:58:18,846 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/701d931e88634fe789931921e3892972 because midkey is the same as first or last row 2024-12-03T04:58:18,846 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-03T04:58:18,846 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T04:58:18,846 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/701d931e88634fe789931921e3892972 because midkey is the same as first or last row 2024-12-03T04:58:18,846 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T04:58:18,846 DEBUG [RS:0;32beccd76b35:43303-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b4f7fa158df258d504886f3e9969c39:info 2024-12-03T04:58:30,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43303 {}] regionserver.HRegion(8855): Flush requested on 7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:58:30,706 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7b4f7fa158df258d504886f3e9969c39 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T04:58:30,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/d328b68a83a246b29f8e7815f6c0ffbe is 1080, key is row0022/info:/1733201898681/Put/seqid=0 2024-12-03T04:58:30,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741849_1025 (size=12509) 2024-12-03T04:58:30,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741849_1025 (size=12509) 2024-12-03T04:58:30,726 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/d328b68a83a246b29f8e7815f6c0ffbe 2024-12-03T04:58:30,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/d328b68a83a246b29f8e7815f6c0ffbe as hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/d328b68a83a246b29f8e7815f6c0ffbe 2024-12-03T04:58:30,749 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/d328b68a83a246b29f8e7815f6c0ffbe, entries=7, sequenceid=42, filesize=12.2 K 2024-12-03T04:58:30,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 7b4f7fa158df258d504886f3e9969c39 in 44ms, sequenceid=42, compaction requested=false 2024-12-03T04:58:30,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7b4f7fa158df258d504886f3e9969c39: 2024-12-03T04:58:30,750 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-03T04:58:30,750 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T04:58:30,751 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/701d931e88634fe789931921e3892972 because midkey is the same as first or last row 2024-12-03T04:58:31,326 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T04:58:36,979 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7b4f7fa158df258d504886f3e9969c39, had cached 0 bytes from a total of 40219 2024-12-03T04:58:38,733 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T04:58:38,734 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T04:58:38,734 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T04:58:38,739 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:58:38,740 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:58:38,740 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T04:58:38,740 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T04:58:38,740 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1053966274, stopped=false 2024-12-03T04:58:38,741 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=32beccd76b35,34303,1733201822340 2024-12-03T04:58:38,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T04:58:38,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T04:58:38,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:38,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:38,743 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T04:58:38,744 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:58:38,744 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T04:58:38,744 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:58:38,744 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T04:58:38,744 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:58:38,745 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '32beccd76b35,43303,1733201823270' ***** 2024-12-03T04:58:38,745 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T04:58:38,745 INFO [RS:0;32beccd76b35:43303 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T04:58:38,746 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T04:58:38,746 INFO [RS:0;32beccd76b35:43303 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T04:58:38,746 INFO [RS:0;32beccd76b35:43303 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T04:58:38,746 INFO [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(3091): Received CLOSE for 7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:58:38,755 INFO [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(959): stopping server 32beccd76b35,43303,1733201823270 2024-12-03T04:58:38,755 INFO [RS:0;32beccd76b35:43303 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T04:58:38,755 INFO [RS:0;32beccd76b35:43303 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;32beccd76b35:43303. 2024-12-03T04:58:38,755 DEBUG [RS:0;32beccd76b35:43303 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T04:58:38,755 DEBUG [RS:0;32beccd76b35:43303 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:58:38,755 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7b4f7fa158df258d504886f3e9969c39, disabling compactions & flushes 2024-12-03T04:58:38,756 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. 2024-12-03T04:58:38,756 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. 2024-12-03T04:58:38,756 INFO [RS:0;32beccd76b35:43303 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T04:58:38,756 INFO [RS:0;32beccd76b35:43303 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T04:58:38,756 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. after waiting 0 ms 2024-12-03T04:58:38,756 INFO [RS:0;32beccd76b35:43303 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T04:58:38,756 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. 2024-12-03T04:58:38,756 INFO [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T04:58:38,756 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 7b4f7fa158df258d504886f3e9969c39 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-03T04:58:38,757 INFO [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-03T04:58:38,757 DEBUG [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(1325): Online Regions={7b4f7fa158df258d504886f3e9969c39=TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39., 1588230740=hbase:meta,,1.1588230740} 2024-12-03T04:58:38,758 DEBUG [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7b4f7fa158df258d504886f3e9969c39 2024-12-03T04:58:38,758 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T04:58:38,758 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T04:58:38,758 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T04:58:38,758 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T04:58:38,759 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T04:58:38,759 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-03T04:58:38,764 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/321c9038d5744b588424b3d0c799fba6 is 1080, key is row0029/info:/1733201912714/Put/seqid=0 2024-12-03T04:58:38,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741850_1026 (size=8193) 2024-12-03T04:58:38,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741850_1026 (size=8193) 2024-12-03T04:58:38,778 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/321c9038d5744b588424b3d0c799fba6 2024-12-03T04:58:38,791 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/.tmp/info/84a2101bdfdb4e288b5d7489317859b7 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39./info:regioninfo/1733201827012/Put/seqid=0 2024-12-03T04:58:38,794 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/.tmp/info/321c9038d5744b588424b3d0c799fba6 as hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/321c9038d5744b588424b3d0c799fba6 2024-12-03T04:58:38,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741851_1027 (size=7016) 2024-12-03T04:58:38,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741851_1027 (size=7016) 2024-12-03T04:58:38,801 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/.tmp/info/84a2101bdfdb4e288b5d7489317859b7 2024-12-03T04:58:38,823 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/321c9038d5744b588424b3d0c799fba6, entries=3, sequenceid=48, filesize=8.0 K 2024-12-03T04:58:38,826 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 7b4f7fa158df258d504886f3e9969c39 in 70ms, sequenceid=48, compaction requested=true 2024-12-03T04:58:38,827 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/b0815949d8f7411782cf0a61b569ec8d, hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/bc4415cdf3ff40ef8111757ecf660f47, hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/a5b028da81444512bea2228ad8e0619a] to archive 2024-12-03T04:58:38,830 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T04:58:38,832 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/b0815949d8f7411782cf0a61b569ec8d to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/archive/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/b0815949d8f7411782cf0a61b569ec8d 2024-12-03T04:58:38,835 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/bc4415cdf3ff40ef8111757ecf660f47 to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/archive/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/bc4415cdf3ff40ef8111757ecf660f47 2024-12-03T04:58:38,838 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/a5b028da81444512bea2228ad8e0619a to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/archive/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/info/a5b028da81444512bea2228ad8e0619a 2024-12-03T04:58:38,842 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/.tmp/ns/b9c69b04405d4cff8f61fc94000d754e is 43, key is default/ns:d/1733201825765/Put/seqid=0 2024-12-03T04:58:38,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741852_1028 (size=5153) 2024-12-03T04:58:38,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741852_1028 (size=5153) 2024-12-03T04:58:38,857 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/.tmp/ns/b9c69b04405d4cff8f61fc94000d754e 2024-12-03T04:58:38,855 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=32beccd76b35:34303 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-03T04:58:38,860 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [b0815949d8f7411782cf0a61b569ec8d=12509, bc4415cdf3ff40ef8111757ecf660f47=12509, a5b028da81444512bea2228ad8e0619a=12509] 2024-12-03T04:58:38,895 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/default/TestLogRolling-testSlowSyncLogRolling/7b4f7fa158df258d504886f3e9969c39/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-03T04:58:38,898 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. 2024-12-03T04:58:38,898 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7b4f7fa158df258d504886f3e9969c39: Waiting for close lock at 1733201918755Running coprocessor pre-close hooks at 1733201918755Disabling compacts and flushes for region at 1733201918755Disabling writes for close at 1733201918756 (+1 ms)Obtaining lock to block concurrent updates at 1733201918756Preparing flush snapshotting stores in 7b4f7fa158df258d504886f3e9969c39 at 1733201918756Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733201918757 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. at 1733201918758 (+1 ms)Flushing 7b4f7fa158df258d504886f3e9969c39/info: creating writer at 1733201918758Flushing 7b4f7fa158df258d504886f3e9969c39/info: appending metadata at 1733201918763 (+5 ms)Flushing 7b4f7fa158df258d504886f3e9969c39/info: closing flushed file at 1733201918763Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24cd356: reopening flushed file at 1733201918793 (+30 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 7b4f7fa158df258d504886f3e9969c39 in 70ms, sequenceid=48, compaction requested=true at 1733201918826 (+33 ms)Writing region close event to WAL at 1733201918865 (+39 ms)Running coprocessor post-close hooks at 1733201918896 (+31 ms)Closed at 1733201918898 (+2 ms) 2024-12-03T04:58:38,899 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733201826054.7b4f7fa158df258d504886f3e9969c39. 2024-12-03T04:58:38,908 INFO [regionserver/32beccd76b35:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T04:58:38,911 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/.tmp/table/74cf4ed6ed0e47d1a8b6113fcbdd2d70 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733201827031/Put/seqid=0 2024-12-03T04:58:38,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741853_1029 (size=5396) 2024-12-03T04:58:38,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741853_1029 (size=5396) 2024-12-03T04:58:38,926 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/.tmp/table/74cf4ed6ed0e47d1a8b6113fcbdd2d70 2024-12-03T04:58:38,948 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/.tmp/info/84a2101bdfdb4e288b5d7489317859b7 as hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/info/84a2101bdfdb4e288b5d7489317859b7 2024-12-03T04:58:38,956 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/info/84a2101bdfdb4e288b5d7489317859b7, entries=10, sequenceid=11, filesize=6.9 K 2024-12-03T04:58:38,958 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/.tmp/ns/b9c69b04405d4cff8f61fc94000d754e as hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/ns/b9c69b04405d4cff8f61fc94000d754e 2024-12-03T04:58:38,958 DEBUG [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-03T04:58:38,966 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/ns/b9c69b04405d4cff8f61fc94000d754e, entries=2, sequenceid=11, filesize=5.0 K 2024-12-03T04:58:38,967 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/.tmp/table/74cf4ed6ed0e47d1a8b6113fcbdd2d70 as hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/table/74cf4ed6ed0e47d1a8b6113fcbdd2d70 2024-12-03T04:58:38,975 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/table/74cf4ed6ed0e47d1a8b6113fcbdd2d70, entries=2, sequenceid=11, filesize=5.3 K 2024-12-03T04:58:38,976 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 217ms, sequenceid=11, compaction requested=false 2024-12-03T04:58:38,984 INFO [regionserver/32beccd76b35:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T04:58:38,984 INFO [regionserver/32beccd76b35:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T04:58:38,986 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-03T04:58:38,987 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T04:58:38,988 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T04:58:38,988 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733201918758Running coprocessor pre-close hooks at 1733201918758Disabling compacts and flushes for region at 1733201918758Disabling writes for close at 1733201918759 (+1 ms)Obtaining lock to block concurrent updates at 1733201918759Preparing flush snapshotting stores in 1588230740 at 1733201918759Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733201918759Flushing stores of hbase:meta,,1.1588230740 at 1733201918760 (+1 ms)Flushing 1588230740/info: creating writer at 1733201918761 (+1 ms)Flushing 1588230740/info: appending metadata at 1733201918791 (+30 ms)Flushing 1588230740/info: closing flushed file at 1733201918791Flushing 1588230740/ns: creating writer at 1733201918815 (+24 ms)Flushing 1588230740/ns: appending metadata at 1733201918841 (+26 ms)Flushing 1588230740/ns: closing flushed file at 1733201918841Flushing 1588230740/table: creating writer at 1733201918872 (+31 ms)Flushing 1588230740/table: appending metadata at 1733201918909 (+37 ms)Flushing 1588230740/table: closing flushed file at 1733201918909Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56cc1542: reopening flushed file at 1733201918947 (+38 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c8d6a92: reopening flushed file at 1733201918957 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ef366ea: reopening flushed file at 1733201918966 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 217ms, sequenceid=11, compaction requested=false at 1733201918977 (+11 ms)Writing region close event to WAL at 1733201918981 (+4 ms)Running coprocessor post-close hooks at 1733201918987 (+6 ms)Closed at 1733201918988 (+1 ms) 2024-12-03T04:58:38,988 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T04:58:39,158 INFO [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(976): stopping server 32beccd76b35,43303,1733201823270; all regions closed. 2024-12-03T04:58:39,160 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:39,160 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:39,161 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:39,161 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:39,161 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:39,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741834_1010 (size=3066) 2024-12-03T04:58:39,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741834_1010 (size=3066) 2024-12-03T04:58:39,569 DEBUG [RS:0;32beccd76b35:43303 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/oldWALs 2024-12-03T04:58:39,569 INFO [RS:0;32beccd76b35:43303 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 32beccd76b35%2C43303%2C1733201823270.meta:.meta(num 1733201825538) 2024-12-03T04:58:39,573 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:39,573 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:39,574 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:39,574 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:39,574 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:39,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741847_1023 (size=12695) 2024-12-03T04:58:39,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741847_1023 (size=12695) 2024-12-03T04:58:39,580 DEBUG [RS:0;32beccd76b35:43303 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/oldWALs 2024-12-03T04:58:39,580 INFO [RS:0;32beccd76b35:43303 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 32beccd76b35%2C43303%2C1733201823270:(num 1733201898679) 2024-12-03T04:58:39,580 DEBUG [RS:0;32beccd76b35:43303 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:58:39,580 INFO [RS:0;32beccd76b35:43303 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T04:58:39,580 INFO [RS:0;32beccd76b35:43303 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T04:58:39,581 INFO [RS:0;32beccd76b35:43303 {}] hbase.ChoreService(370): Chore service for: regionserver/32beccd76b35:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T04:58:39,581 INFO [RS:0;32beccd76b35:43303 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T04:58:39,581 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T04:58:39,581 INFO [RS:0;32beccd76b35:43303 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43303 2024-12-03T04:58:39,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T04:58:39,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/32beccd76b35,43303,1733201823270 2024-12-03T04:58:39,585 INFO [RS:0;32beccd76b35:43303 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T04:58:39,586 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [32beccd76b35,43303,1733201823270] 2024-12-03T04:58:39,588 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/32beccd76b35,43303,1733201823270 already deleted, retry=false 2024-12-03T04:58:39,588 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 32beccd76b35,43303,1733201823270 expired; onlineServers=0 2024-12-03T04:58:39,588 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '32beccd76b35,34303,1733201822340' ***** 2024-12-03T04:58:39,588 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T04:58:39,589 INFO [M:0;32beccd76b35:34303 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T04:58:39,589 INFO [M:0;32beccd76b35:34303 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T04:58:39,589 DEBUG [M:0;32beccd76b35:34303 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T04:58:39,589 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T04:58:39,589 DEBUG [M:0;32beccd76b35:34303 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T04:58:39,589 DEBUG [master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733201824706 {}] cleaner.HFileCleaner(306): Exit Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733201824706,5,FailOnTimeoutGroup] 2024-12-03T04:58:39,589 DEBUG [master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733201824704 {}] cleaner.HFileCleaner(306): Exit Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733201824704,5,FailOnTimeoutGroup] 2024-12-03T04:58:39,589 INFO [M:0;32beccd76b35:34303 {}] hbase.ChoreService(370): Chore service for: master/32beccd76b35:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T04:58:39,589 INFO [M:0;32beccd76b35:34303 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T04:58:39,590 DEBUG [M:0;32beccd76b35:34303 {}] master.HMaster(1795): Stopping service threads 2024-12-03T04:58:39,590 INFO [M:0;32beccd76b35:34303 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T04:58:39,590 INFO [M:0;32beccd76b35:34303 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T04:58:39,590 INFO [M:0;32beccd76b35:34303 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T04:58:39,590 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T04:58:39,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T04:58:39,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:39,591 DEBUG [M:0;32beccd76b35:34303 {}] zookeeper.ZKUtil(347): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T04:58:39,591 WARN [M:0;32beccd76b35:34303 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T04:58:39,592 INFO [M:0;32beccd76b35:34303 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/.lastflushedseqids 2024-12-03T04:58:39,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741854_1030 (size=130) 2024-12-03T04:58:39,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741854_1030 (size=130) 2024-12-03T04:58:39,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T04:58:39,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43303-0x1015173f3870001, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T04:58:39,689 INFO [RS:0;32beccd76b35:43303 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T04:58:39,689 INFO [RS:0;32beccd76b35:43303 {}] regionserver.HRegionServer(1031): Exiting; stopping=32beccd76b35,43303,1733201823270; zookeeper connection closed. 2024-12-03T04:58:39,689 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6acde027 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6acde027 2024-12-03T04:58:39,690 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T04:58:40,007 INFO [M:0;32beccd76b35:34303 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T04:58:40,007 INFO [M:0;32beccd76b35:34303 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T04:58:40,007 DEBUG [M:0;32beccd76b35:34303 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T04:58:40,007 INFO [M:0;32beccd76b35:34303 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:58:40,007 DEBUG [M:0;32beccd76b35:34303 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:58:40,007 DEBUG [M:0;32beccd76b35:34303 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T04:58:40,007 DEBUG [M:0;32beccd76b35:34303 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:58:40,008 INFO [M:0;32beccd76b35:34303 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-12-03T04:58:40,039 DEBUG [M:0;32beccd76b35:34303 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c2f4e24db7f14f83a417830833b402f7 is 82, key is hbase:meta,,1/info:regioninfo/1733201825659/Put/seqid=0 2024-12-03T04:58:40,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741855_1031 (size=5672) 2024-12-03T04:58:40,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741855_1031 (size=5672) 2024-12-03T04:58:40,050 INFO [M:0;32beccd76b35:34303 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c2f4e24db7f14f83a417830833b402f7 2024-12-03T04:58:40,079 DEBUG [M:0;32beccd76b35:34303 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/26a144980b224250b8a97f7f1aea7f3e is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733201827040/Put/seqid=0 2024-12-03T04:58:40,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741856_1032 (size=6247) 2024-12-03T04:58:40,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741856_1032 (size=6247) 2024-12-03T04:58:40,093 INFO [M:0;32beccd76b35:34303 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/26a144980b224250b8a97f7f1aea7f3e 2024-12-03T04:58:40,100 INFO [M:0;32beccd76b35:34303 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 26a144980b224250b8a97f7f1aea7f3e 2024-12-03T04:58:40,123 DEBUG [M:0;32beccd76b35:34303 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dd9ae7645fda4d5db4f11fe5581cadd0 is 69, key is 32beccd76b35,43303,1733201823270/rs:state/1733201824769/Put/seqid=0 2024-12-03T04:58:40,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741857_1033 (size=5156) 2024-12-03T04:58:40,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741857_1033 (size=5156) 2024-12-03T04:58:40,132 INFO [M:0;32beccd76b35:34303 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dd9ae7645fda4d5db4f11fe5581cadd0 2024-12-03T04:58:40,158 DEBUG [M:0;32beccd76b35:34303 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a1e3a89db72b44b684afe8b2c1e7ee37 is 52, key is load_balancer_on/state:d/1733201826020/Put/seqid=0 2024-12-03T04:58:40,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741858_1034 (size=5056) 2024-12-03T04:58:40,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741858_1034 (size=5056) 2024-12-03T04:58:40,173 INFO [M:0;32beccd76b35:34303 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a1e3a89db72b44b684afe8b2c1e7ee37 2024-12-03T04:58:40,181 DEBUG [M:0;32beccd76b35:34303 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c2f4e24db7f14f83a417830833b402f7 as hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c2f4e24db7f14f83a417830833b402f7 2024-12-03T04:58:40,190 INFO [M:0;32beccd76b35:34303 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c2f4e24db7f14f83a417830833b402f7, entries=8, sequenceid=59, filesize=5.5 K 2024-12-03T04:58:40,191 DEBUG [M:0;32beccd76b35:34303 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/26a144980b224250b8a97f7f1aea7f3e as hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/26a144980b224250b8a97f7f1aea7f3e 2024-12-03T04:58:40,199 INFO [M:0;32beccd76b35:34303 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 26a144980b224250b8a97f7f1aea7f3e 2024-12-03T04:58:40,199 INFO [M:0;32beccd76b35:34303 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/26a144980b224250b8a97f7f1aea7f3e, entries=6, sequenceid=59, filesize=6.1 K 2024-12-03T04:58:40,201 DEBUG [M:0;32beccd76b35:34303 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dd9ae7645fda4d5db4f11fe5581cadd0 as hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dd9ae7645fda4d5db4f11fe5581cadd0 2024-12-03T04:58:40,208 INFO [M:0;32beccd76b35:34303 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dd9ae7645fda4d5db4f11fe5581cadd0, entries=1, sequenceid=59, filesize=5.0 K 2024-12-03T04:58:40,210 DEBUG [M:0;32beccd76b35:34303 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a1e3a89db72b44b684afe8b2c1e7ee37 as hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a1e3a89db72b44b684afe8b2c1e7ee37 2024-12-03T04:58:40,217 INFO [M:0;32beccd76b35:34303 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a1e3a89db72b44b684afe8b2c1e7ee37, entries=1, sequenceid=59, filesize=4.9 K 2024-12-03T04:58:40,219 INFO [M:0;32beccd76b35:34303 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 212ms, sequenceid=59, compaction requested=false 2024-12-03T04:58:40,221 INFO [M:0;32beccd76b35:34303 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:58:40,222 DEBUG [M:0;32beccd76b35:34303 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733201920007Disabling compacts and flushes for region at 1733201920007Disabling writes for close at 1733201920007Obtaining lock to block concurrent updates at 1733201920008 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733201920008Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1733201920009 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733201920010 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733201920010Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733201920039 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733201920039Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733201920059 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733201920078 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733201920078Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733201920100 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733201920122 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733201920122Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733201920140 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733201920157 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733201920157Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78771d8e: reopening flushed file at 1733201920180 (+23 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b4e35d7: reopening flushed file at 1733201920190 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b11f458: reopening flushed file at 1733201920199 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16f92a04: reopening flushed file at 1733201920208 (+9 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 212ms, sequenceid=59, compaction requested=false at 1733201920219 (+11 ms)Writing region close event to WAL at 1733201920221 (+2 ms)Closed at 1733201920221 2024-12-03T04:58:40,223 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:40,223 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:40,223 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:40,223 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:40,223 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:40,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36129 is added to blk_1073741830_1006 (size=27973) 2024-12-03T04:58:40,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41791 is added to blk_1073741830_1006 (size=27973) 2024-12-03T04:58:40,227 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T04:58:40,227 INFO [M:0;32beccd76b35:34303 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T04:58:40,227 INFO [M:0;32beccd76b35:34303 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34303 2024-12-03T04:58:40,228 INFO [M:0;32beccd76b35:34303 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T04:58:40,330 INFO [M:0;32beccd76b35:34303 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T04:58:40,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T04:58:40,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34303-0x1015173f3870000, quorum=127.0.0.1:63841, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T04:58:40,339 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ead356f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:58:40,342 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a4bcb2c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:58:40,342 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:58:40,342 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f51668d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:58:40,342 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fad11b5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/hadoop.log.dir/,STOPPED} 2024-12-03T04:58:40,346 WARN [BP-1722606024-172.17.0.3-1733201818878 heartbeating to localhost/127.0.0.1:44433 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T04:58:40,346 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T04:58:40,346 WARN [BP-1722606024-172.17.0.3-1733201818878 heartbeating to localhost/127.0.0.1:44433 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1722606024-172.17.0.3-1733201818878 (Datanode Uuid 6e13a2ba-b388-42c9-a4b5-639a45d02156) service to localhost/127.0.0.1:44433 2024-12-03T04:58:40,346 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T04:58:40,347 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/cluster_2e3f5107-a8de-7682-7467-822af76e42a8/data/data3/current/BP-1722606024-172.17.0.3-1733201818878 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:58:40,348 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/cluster_2e3f5107-a8de-7682-7467-822af76e42a8/data/data4/current/BP-1722606024-172.17.0.3-1733201818878 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:58:40,348 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T04:58:40,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1224a347{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:58:40,352 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@157c43ed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:58:40,352 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:58:40,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21c149f1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:58:40,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1557a369{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/hadoop.log.dir/,STOPPED} 2024-12-03T04:58:40,355 WARN [BP-1722606024-172.17.0.3-1733201818878 heartbeating to localhost/127.0.0.1:44433 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T04:58:40,355 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T04:58:40,355 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T04:58:40,355 WARN [BP-1722606024-172.17.0.3-1733201818878 heartbeating to localhost/127.0.0.1:44433 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1722606024-172.17.0.3-1733201818878 (Datanode Uuid 669ad6ee-683d-4059-8bd9-b3b0b4724274) service to localhost/127.0.0.1:44433 2024-12-03T04:58:40,356 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/cluster_2e3f5107-a8de-7682-7467-822af76e42a8/data/data1/current/BP-1722606024-172.17.0.3-1733201818878 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:58:40,356 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/cluster_2e3f5107-a8de-7682-7467-822af76e42a8/data/data2/current/BP-1722606024-172.17.0.3-1733201818878 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:58:40,356 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T04:58:40,368 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5f961078{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T04:58:40,369 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25dfddc5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:58:40,369 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:58:40,369 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@455f3457{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:58:40,369 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75bdea07{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/hadoop.log.dir/,STOPPED} 2024-12-03T04:58:40,383 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T04:58:40,424 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T04:58:40,437 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44433 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44433 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:44433 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/32beccd76b35:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:44433 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/32beccd76b35:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: regionserver/32beccd76b35:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44433 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:44433 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@7b8013cb java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:44433 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44433 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=405 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=367 (was 545), ProcessCount=11 (was 11), AvailableMemoryMB=5002 (was 4922) - AvailableMemoryMB LEAK? - 2024-12-03T04:58:40,445 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=405, MaxFileDescriptor=1048576, SystemLoadAverage=367, ProcessCount=12, AvailableMemoryMB=5000 2024-12-03T04:58:40,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T04:58:40,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/hadoop.log.dir so I do NOT create it in target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7 2024-12-03T04:58:40,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/78861b3e-5212-f9f7-7932-ea603150bad4/hadoop.tmp.dir so I do NOT create it in target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7 2024-12-03T04:58:40,446 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/cluster_e37b92ff-78c9-ecad-43ef-53347177872c, deleteOnExit=true 2024-12-03T04:58:40,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T04:58:40,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/test.cache.data in system properties and HBase conf 2024-12-03T04:58:40,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T04:58:40,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/hadoop.log.dir in system properties and HBase conf 2024-12-03T04:58:40,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T04:58:40,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T04:58:40,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T04:58:40,446 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T04:58:40,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T04:58:40,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T04:58:40,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T04:58:40,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T04:58:40,447 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T04:58:40,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T04:58:40,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T04:58:40,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T04:58:40,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T04:58:40,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/nfs.dump.dir in system properties and HBase conf 2024-12-03T04:58:40,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/java.io.tmpdir in system properties and HBase conf 2024-12-03T04:58:40,448 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T04:58:40,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T04:58:40,449 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T04:58:40,467 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T04:58:40,559 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:58:40,565 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:58:40,566 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:58:40,566 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:58:40,566 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T04:58:40,567 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:58:40,568 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2db53ea5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:58:40,569 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@715839fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:58:40,700 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3715268d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/java.io.tmpdir/jetty-localhost-40341-hadoop-hdfs-3_4_1-tests_jar-_-any-7845597555175030558/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T04:58:40,701 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f9bf265{HTTP/1.1, (http/1.1)}{localhost:40341} 2024-12-03T04:58:40,701 INFO [Time-limited test {}] server.Server(415): Started @104863ms 2024-12-03T04:58:40,723 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T04:58:40,809 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:58:40,813 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:58:40,814 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:58:40,814 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:58:40,814 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T04:58:40,815 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ee6bbfa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:58:40,815 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78694030{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:58:40,943 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4227a026{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/java.io.tmpdir/jetty-localhost-45635-hadoop-hdfs-3_4_1-tests_jar-_-any-2254521981505200914/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:58:40,943 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5c18786c{HTTP/1.1, (http/1.1)}{localhost:45635} 2024-12-03T04:58:40,943 INFO [Time-limited test {}] server.Server(415): Started @105106ms 2024-12-03T04:58:40,945 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T04:58:40,998 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:58:41,002 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:58:41,003 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:58:41,003 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:58:41,003 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T04:58:41,003 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e48ac18{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:58:41,004 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19a8d3ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:58:41,057 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/cluster_e37b92ff-78c9-ecad-43ef-53347177872c/data/data1/current/BP-7222034-172.17.0.3-1733201920489/current, will proceed with Du for space computation calculation, 2024-12-03T04:58:41,057 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/cluster_e37b92ff-78c9-ecad-43ef-53347177872c/data/data2/current/BP-7222034-172.17.0.3-1733201920489/current, will proceed with Du for space computation calculation, 2024-12-03T04:58:41,090 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T04:58:41,094 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x54002118b5abb8e2 with lease ID 0x6280a76a2f1bf463: Processing first storage report for DS-98986dc2-6480-4f26-ba50-55ed376f3d41 from datanode DatanodeRegistration(127.0.0.1:33875, datanodeUuid=0491666b-7ba0-4ec6-adbe-4a6a9f0bb4d9, infoPort=44905, infoSecurePort=0, ipcPort=34829, storageInfo=lv=-57;cid=testClusterID;nsid=603317353;c=1733201920489) 2024-12-03T04:58:41,095 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x54002118b5abb8e2 with lease ID 0x6280a76a2f1bf463: from storage DS-98986dc2-6480-4f26-ba50-55ed376f3d41 node DatanodeRegistration(127.0.0.1:33875, datanodeUuid=0491666b-7ba0-4ec6-adbe-4a6a9f0bb4d9, infoPort=44905, infoSecurePort=0, ipcPort=34829, storageInfo=lv=-57;cid=testClusterID;nsid=603317353;c=1733201920489), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:58:41,095 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x54002118b5abb8e2 with lease ID 0x6280a76a2f1bf463: Processing first storage report for DS-d7bdf455-025c-4961-90c2-b73b030762f9 from datanode DatanodeRegistration(127.0.0.1:33875, datanodeUuid=0491666b-7ba0-4ec6-adbe-4a6a9f0bb4d9, infoPort=44905, infoSecurePort=0, ipcPort=34829, storageInfo=lv=-57;cid=testClusterID;nsid=603317353;c=1733201920489) 2024-12-03T04:58:41,095 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x54002118b5abb8e2 with lease ID 0x6280a76a2f1bf463: from storage DS-d7bdf455-025c-4961-90c2-b73b030762f9 node DatanodeRegistration(127.0.0.1:33875, datanodeUuid=0491666b-7ba0-4ec6-adbe-4a6a9f0bb4d9, infoPort=44905, infoSecurePort=0, ipcPort=34829, storageInfo=lv=-57;cid=testClusterID;nsid=603317353;c=1733201920489), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:58:41,139 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7d7dc570{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/java.io.tmpdir/jetty-localhost-43829-hadoop-hdfs-3_4_1-tests_jar-_-any-2677254825554957172/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:58:41,140 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@f926f6b{HTTP/1.1, (http/1.1)}{localhost:43829} 2024-12-03T04:58:41,140 INFO [Time-limited test {}] server.Server(415): Started @105302ms 2024-12-03T04:58:41,142 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T04:58:41,239 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/cluster_e37b92ff-78c9-ecad-43ef-53347177872c/data/data3/current/BP-7222034-172.17.0.3-1733201920489/current, will proceed with Du for space computation calculation, 2024-12-03T04:58:41,239 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/cluster_e37b92ff-78c9-ecad-43ef-53347177872c/data/data4/current/BP-7222034-172.17.0.3-1733201920489/current, will proceed with Du for space computation calculation, 2024-12-03T04:58:41,264 WARN [Thread-453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T04:58:41,267 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x91604d75a60fc47c with lease ID 0x6280a76a2f1bf464: Processing first storage report for DS-a221d8fe-2ca5-423c-80b4-bdc6f1aa53b4 from datanode DatanodeRegistration(127.0.0.1:37485, datanodeUuid=5590a3aa-ed19-46f1-a63c-86507005d77b, infoPort=37373, infoSecurePort=0, ipcPort=39173, storageInfo=lv=-57;cid=testClusterID;nsid=603317353;c=1733201920489) 2024-12-03T04:58:41,267 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x91604d75a60fc47c with lease ID 0x6280a76a2f1bf464: from storage DS-a221d8fe-2ca5-423c-80b4-bdc6f1aa53b4 node DatanodeRegistration(127.0.0.1:37485, datanodeUuid=5590a3aa-ed19-46f1-a63c-86507005d77b, infoPort=37373, infoSecurePort=0, ipcPort=39173, storageInfo=lv=-57;cid=testClusterID;nsid=603317353;c=1733201920489), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T04:58:41,267 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x91604d75a60fc47c with lease ID 0x6280a76a2f1bf464: Processing first storage report for DS-b6ebe639-8371-4793-a392-41f7162c9456 from datanode DatanodeRegistration(127.0.0.1:37485, datanodeUuid=5590a3aa-ed19-46f1-a63c-86507005d77b, infoPort=37373, infoSecurePort=0, ipcPort=39173, storageInfo=lv=-57;cid=testClusterID;nsid=603317353;c=1733201920489) 2024-12-03T04:58:41,267 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x91604d75a60fc47c with lease ID 0x6280a76a2f1bf464: from storage DS-b6ebe639-8371-4793-a392-41f7162c9456 node DatanodeRegistration(127.0.0.1:37485, datanodeUuid=5590a3aa-ed19-46f1-a63c-86507005d77b, infoPort=37373, infoSecurePort=0, ipcPort=39173, storageInfo=lv=-57;cid=testClusterID;nsid=603317353;c=1733201920489), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:58:41,292 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7 2024-12-03T04:58:41,302 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/cluster_e37b92ff-78c9-ecad-43ef-53347177872c/zookeeper_0, clientPort=62804, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/cluster_e37b92ff-78c9-ecad-43ef-53347177872c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/cluster_e37b92ff-78c9-ecad-43ef-53347177872c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T04:58:41,306 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62804 2024-12-03T04:58:41,307 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:41,308 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:41,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33875 is added to blk_1073741825_1001 (size=7) 2024-12-03T04:58:41,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37485 is added to blk_1073741825_1001 (size=7) 2024-12-03T04:58:41,335 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea with version=8 2024-12-03T04:58:41,335 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/hbase-staging 2024-12-03T04:58:41,338 INFO [Time-limited test {}] client.ConnectionUtils(128): master/32beccd76b35:0 server-side Connection retries=45 2024-12-03T04:58:41,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:58:41,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T04:58:41,338 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T04:58:41,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:58:41,338 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T04:58:41,339 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T04:58:41,339 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T04:58:41,340 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45277 2024-12-03T04:58:41,341 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45277 connecting to ZooKeeper ensemble=127.0.0.1:62804 2024-12-03T04:58:41,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:452770x0, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T04:58:41,349 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45277-0x101517579be0000 connected 2024-12-03T04:58:41,366 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:41,368 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:41,370 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:58:41,371 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea, hbase.cluster.distributed=false 2024-12-03T04:58:41,372 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T04:58:41,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45277 2024-12-03T04:58:41,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45277 2024-12-03T04:58:41,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45277 2024-12-03T04:58:41,374 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45277 2024-12-03T04:58:41,377 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45277 2024-12-03T04:58:41,397 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/32beccd76b35:0 server-side Connection retries=45 2024-12-03T04:58:41,397 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:58:41,397 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T04:58:41,397 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T04:58:41,397 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:58:41,397 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T04:58:41,398 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T04:58:41,398 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T04:58:41,400 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34955 2024-12-03T04:58:41,401 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34955 connecting to ZooKeeper ensemble=127.0.0.1:62804 2024-12-03T04:58:41,402 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:41,405 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:41,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:349550x0, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T04:58:41,411 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34955-0x101517579be0001 connected 2024-12-03T04:58:41,411 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:58:41,411 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T04:58:41,414 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T04:58:41,415 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T04:58:41,416 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T04:58:41,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34955 2024-12-03T04:58:41,422 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34955 2024-12-03T04:58:41,423 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34955 2024-12-03T04:58:41,425 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34955 2024-12-03T04:58:41,428 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34955 2024-12-03T04:58:41,449 DEBUG [M:0;32beccd76b35:45277 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;32beccd76b35:45277 2024-12-03T04:58:41,451 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/32beccd76b35,45277,1733201921337 2024-12-03T04:58:41,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T04:58:41,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T04:58:41,454 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/32beccd76b35,45277,1733201921337 2024-12-03T04:58:41,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T04:58:41,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:41,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:41,457 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T04:58:41,458 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/32beccd76b35,45277,1733201921337 from backup master directory 2024-12-03T04:58:41,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T04:58:41,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/32beccd76b35,45277,1733201921337 2024-12-03T04:58:41,459 WARN [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T04:58:41,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T04:58:41,459 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=32beccd76b35,45277,1733201921337 2024-12-03T04:58:41,467 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/hbase.id] with ID: d6fcfc5c-f64c-4da0-a018-5dfb97a0e8f1 2024-12-03T04:58:41,467 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/.tmp/hbase.id 2024-12-03T04:58:41,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33875 is added to blk_1073741826_1002 (size=42) 2024-12-03T04:58:41,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37485 is added to blk_1073741826_1002 (size=42) 2024-12-03T04:58:41,487 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/.tmp/hbase.id]:[hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/hbase.id] 2024-12-03T04:58:41,509 INFO [master/32beccd76b35:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:41,509 INFO [master/32beccd76b35:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T04:58:41,512 INFO [master/32beccd76b35:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-03T04:58:41,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:41,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:41,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37485 is added to blk_1073741827_1003 (size=196) 2024-12-03T04:58:41,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33875 is added to blk_1073741827_1003 (size=196) 2024-12-03T04:58:41,536 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T04:58:41,537 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T04:58:41,537 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T04:58:41,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37485 is added to blk_1073741828_1004 (size=1189) 2024-12-03T04:58:41,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33875 is added to blk_1073741828_1004 (size=1189) 2024-12-03T04:58:41,548 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store 2024-12-03T04:58:41,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37485 is added to blk_1073741829_1005 (size=34) 2024-12-03T04:58:41,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33875 is added to blk_1073741829_1005 (size=34) 2024-12-03T04:58:41,564 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:58:41,564 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T04:58:41,564 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:58:41,564 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:58:41,564 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T04:58:41,564 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:58:41,564 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:58:41,565 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733201921564Disabling compacts and flushes for region at 1733201921564Disabling writes for close at 1733201921564Writing region close event to WAL at 1733201921564Closed at 1733201921564 2024-12-03T04:58:41,566 WARN [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/.initializing 2024-12-03T04:58:41,566 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/WALs/32beccd76b35,45277,1733201921337 2024-12-03T04:58:41,570 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C45277%2C1733201921337, suffix=, logDir=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/WALs/32beccd76b35,45277,1733201921337, archiveDir=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/oldWALs, maxLogs=10 2024-12-03T04:58:41,570 INFO [master/32beccd76b35:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C45277%2C1733201921337.1733201921570 2024-12-03T04:58:41,577 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/WALs/32beccd76b35,45277,1733201921337/32beccd76b35%2C45277%2C1733201921337.1733201921570 2024-12-03T04:58:41,578 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44905:44905),(127.0.0.1/127.0.0.1:37373:37373)] 2024-12-03T04:58:41,579 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T04:58:41,579 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:58:41,579 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:41,579 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:41,581 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:41,583 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T04:58:41,583 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:41,584 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:41,584 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:41,585 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T04:58:41,585 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:41,586 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T04:58:41,586 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:41,588 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T04:58:41,588 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:41,589 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T04:58:41,589 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:41,590 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T04:58:41,590 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:41,591 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T04:58:41,591 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:41,592 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:41,592 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:41,594 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:41,594 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:41,595 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T04:58:41,597 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:41,599 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T04:58:41,600 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878611, jitterRate=0.11721198260784149}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T04:58:41,601 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733201921579Initializing all the Stores at 1733201921580 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201921580Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201921581 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201921581Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201921581Cleaning up temporary data from old regions at 1733201921594 (+13 ms)Region opened successfully at 1733201921601 (+7 ms) 2024-12-03T04:58:41,603 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T04:58:41,608 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48a77067, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32beccd76b35/172.17.0.3:0 2024-12-03T04:58:41,610 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T04:58:41,610 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T04:58:41,610 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T04:58:41,610 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T04:58:41,611 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T04:58:41,611 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T04:58:41,612 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T04:58:41,619 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T04:58:41,620 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T04:58:41,622 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T04:58:41,623 INFO [master/32beccd76b35:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T04:58:41,624 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T04:58:41,625 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T04:58:41,625 INFO [master/32beccd76b35:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T04:58:41,628 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T04:58:41,629 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T04:58:41,630 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T04:58:41,632 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T04:58:41,634 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T04:58:41,635 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T04:58:41,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T04:58:41,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T04:58:41,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:41,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:41,638 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=32beccd76b35,45277,1733201921337, sessionid=0x101517579be0000, setting cluster-up flag (Was=false) 2024-12-03T04:58:41,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:41,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:41,647 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T04:58:41,648 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=32beccd76b35,45277,1733201921337 2024-12-03T04:58:41,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:41,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:41,657 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T04:58:41,658 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=32beccd76b35,45277,1733201921337 2024-12-03T04:58:41,659 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T04:58:41,662 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T04:58:41,662 INFO [master/32beccd76b35:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T04:58:41,662 INFO [master/32beccd76b35:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T04:58:41,662 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 32beccd76b35,45277,1733201921337 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T04:58:41,664 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T04:58:41,664 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T04:58:41,664 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T04:58:41,664 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T04:58:41,664 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/32beccd76b35:0, corePoolSize=10, maxPoolSize=10 2024-12-03T04:58:41,664 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:41,664 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/32beccd76b35:0, corePoolSize=2, maxPoolSize=2 2024-12-03T04:58:41,664 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:41,665 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733201951665 2024-12-03T04:58:41,665 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T04:58:41,665 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T04:58:41,666 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T04:58:41,666 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T04:58:41,666 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T04:58:41,666 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T04:58:41,666 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:41,666 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T04:58:41,666 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T04:58:41,667 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T04:58:41,667 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T04:58:41,667 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T04:58:41,667 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T04:58:41,667 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T04:58:41,667 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733201921667,5,FailOnTimeoutGroup] 2024-12-03T04:58:41,668 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733201921668,5,FailOnTimeoutGroup] 2024-12-03T04:58:41,668 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:41,668 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T04:58:41,668 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:41,668 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:41,668 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:41,668 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T04:58:41,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33875 is added to blk_1073741831_1007 (size=1321) 2024-12-03T04:58:41,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37485 is added to blk_1073741831_1007 (size=1321) 2024-12-03T04:58:41,677 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T04:58:41,677 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea 2024-12-03T04:58:41,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37485 is added to blk_1073741832_1008 (size=32) 2024-12-03T04:58:41,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33875 is added to blk_1073741832_1008 (size=32) 2024-12-03T04:58:41,731 INFO [RS:0;32beccd76b35:34955 {}] regionserver.HRegionServer(746): ClusterId : d6fcfc5c-f64c-4da0-a018-5dfb97a0e8f1 2024-12-03T04:58:41,731 DEBUG [RS:0;32beccd76b35:34955 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T04:58:41,734 DEBUG [RS:0;32beccd76b35:34955 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T04:58:41,734 DEBUG [RS:0;32beccd76b35:34955 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T04:58:41,736 DEBUG [RS:0;32beccd76b35:34955 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T04:58:41,737 DEBUG [RS:0;32beccd76b35:34955 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f00d9b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32beccd76b35/172.17.0.3:0 2024-12-03T04:58:41,752 DEBUG [RS:0;32beccd76b35:34955 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;32beccd76b35:34955 2024-12-03T04:58:41,752 INFO [RS:0;32beccd76b35:34955 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T04:58:41,753 INFO [RS:0;32beccd76b35:34955 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T04:58:41,753 DEBUG [RS:0;32beccd76b35:34955 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T04:58:41,753 INFO [RS:0;32beccd76b35:34955 {}] regionserver.HRegionServer(2659): reportForDuty to master=32beccd76b35,45277,1733201921337 with port=34955, startcode=1733201921396 2024-12-03T04:58:41,754 DEBUG [RS:0;32beccd76b35:34955 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T04:58:41,757 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33399, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T04:58:41,758 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45277 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 32beccd76b35,34955,1733201921396 2024-12-03T04:58:41,758 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45277 {}] master.ServerManager(517): Registering regionserver=32beccd76b35,34955,1733201921396 2024-12-03T04:58:41,760 DEBUG [RS:0;32beccd76b35:34955 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea 2024-12-03T04:58:41,760 DEBUG [RS:0;32beccd76b35:34955 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41733 2024-12-03T04:58:41,760 DEBUG [RS:0;32beccd76b35:34955 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T04:58:41,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T04:58:41,763 DEBUG [RS:0;32beccd76b35:34955 {}] zookeeper.ZKUtil(111): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/32beccd76b35,34955,1733201921396 2024-12-03T04:58:41,763 WARN [RS:0;32beccd76b35:34955 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T04:58:41,763 INFO [RS:0;32beccd76b35:34955 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T04:58:41,763 DEBUG [RS:0;32beccd76b35:34955 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/WALs/32beccd76b35,34955,1733201921396 2024-12-03T04:58:41,763 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [32beccd76b35,34955,1733201921396] 2024-12-03T04:58:41,767 INFO [RS:0;32beccd76b35:34955 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T04:58:41,770 INFO [RS:0;32beccd76b35:34955 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T04:58:41,771 INFO [RS:0;32beccd76b35:34955 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T04:58:41,771 INFO [RS:0;32beccd76b35:34955 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:41,771 INFO [RS:0;32beccd76b35:34955 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T04:58:41,772 INFO [RS:0;32beccd76b35:34955 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T04:58:41,772 INFO [RS:0;32beccd76b35:34955 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:41,772 DEBUG [RS:0;32beccd76b35:34955 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:41,772 DEBUG [RS:0;32beccd76b35:34955 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:41,772 DEBUG [RS:0;32beccd76b35:34955 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:41,772 DEBUG [RS:0;32beccd76b35:34955 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:41,773 DEBUG [RS:0;32beccd76b35:34955 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:41,773 DEBUG [RS:0;32beccd76b35:34955 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/32beccd76b35:0, corePoolSize=2, maxPoolSize=2 2024-12-03T04:58:41,773 DEBUG [RS:0;32beccd76b35:34955 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:41,773 DEBUG [RS:0;32beccd76b35:34955 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:41,773 DEBUG [RS:0;32beccd76b35:34955 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:41,773 DEBUG [RS:0;32beccd76b35:34955 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:41,773 DEBUG [RS:0;32beccd76b35:34955 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:41,773 DEBUG [RS:0;32beccd76b35:34955 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:41,773 DEBUG [RS:0;32beccd76b35:34955 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/32beccd76b35:0, corePoolSize=3, maxPoolSize=3 2024-12-03T04:58:41,773 DEBUG [RS:0;32beccd76b35:34955 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0, corePoolSize=3, maxPoolSize=3 2024-12-03T04:58:41,776 INFO [RS:0;32beccd76b35:34955 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:41,776 INFO [RS:0;32beccd76b35:34955 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:41,776 INFO [RS:0;32beccd76b35:34955 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:41,776 INFO [RS:0;32beccd76b35:34955 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:41,776 INFO [RS:0;32beccd76b35:34955 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:41,776 INFO [RS:0;32beccd76b35:34955 {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,34955,1733201921396-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T04:58:41,794 INFO [RS:0;32beccd76b35:34955 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T04:58:41,795 INFO [RS:0;32beccd76b35:34955 {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,34955,1733201921396-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:41,795 INFO [RS:0;32beccd76b35:34955 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:41,795 INFO [RS:0;32beccd76b35:34955 {}] regionserver.Replication(171): 32beccd76b35,34955,1733201921396 started 2024-12-03T04:58:41,819 INFO [RS:0;32beccd76b35:34955 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:41,819 INFO [RS:0;32beccd76b35:34955 {}] regionserver.HRegionServer(1482): Serving as 32beccd76b35,34955,1733201921396, RpcServer on 32beccd76b35/172.17.0.3:34955, sessionid=0x101517579be0001 2024-12-03T04:58:41,819 DEBUG [RS:0;32beccd76b35:34955 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T04:58:41,819 DEBUG [RS:0;32beccd76b35:34955 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 32beccd76b35,34955,1733201921396 2024-12-03T04:58:41,819 DEBUG [RS:0;32beccd76b35:34955 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32beccd76b35,34955,1733201921396' 2024-12-03T04:58:41,819 DEBUG [RS:0;32beccd76b35:34955 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T04:58:41,821 DEBUG [RS:0;32beccd76b35:34955 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T04:58:41,822 DEBUG [RS:0;32beccd76b35:34955 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T04:58:41,822 DEBUG [RS:0;32beccd76b35:34955 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T04:58:41,822 DEBUG [RS:0;32beccd76b35:34955 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 32beccd76b35,34955,1733201921396 2024-12-03T04:58:41,822 DEBUG [RS:0;32beccd76b35:34955 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32beccd76b35,34955,1733201921396' 2024-12-03T04:58:41,822 DEBUG [RS:0;32beccd76b35:34955 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T04:58:41,823 DEBUG [RS:0;32beccd76b35:34955 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T04:58:41,823 DEBUG [RS:0;32beccd76b35:34955 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T04:58:41,823 INFO [RS:0;32beccd76b35:34955 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T04:58:41,823 INFO [RS:0;32beccd76b35:34955 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T04:58:41,927 INFO [RS:0;32beccd76b35:34955 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C34955%2C1733201921396, suffix=, logDir=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/WALs/32beccd76b35,34955,1733201921396, archiveDir=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/oldWALs, maxLogs=32 2024-12-03T04:58:41,929 INFO [RS:0;32beccd76b35:34955 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C34955%2C1733201921396.1733201921929 2024-12-03T04:58:41,951 INFO [RS:0;32beccd76b35:34955 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/WALs/32beccd76b35,34955,1733201921396/32beccd76b35%2C34955%2C1733201921396.1733201921929 2024-12-03T04:58:41,961 DEBUG [RS:0;32beccd76b35:34955 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44905:44905),(127.0.0.1/127.0.0.1:37373:37373)] 2024-12-03T04:58:42,087 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:58:42,089 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T04:58:42,091 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T04:58:42,091 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:42,092 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:42,092 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T04:58:42,094 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T04:58:42,094 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:42,095 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:42,095 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T04:58:42,097 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T04:58:42,097 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:42,097 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:42,098 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T04:58:42,099 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T04:58:42,100 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:42,100 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:42,100 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T04:58:42,102 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/data/hbase/meta/1588230740 2024-12-03T04:58:42,102 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/data/hbase/meta/1588230740 2024-12-03T04:58:42,104 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T04:58:42,104 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T04:58:42,105 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T04:58:42,107 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T04:58:42,114 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T04:58:42,115 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=715510, jitterRate=-0.09018298983573914}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T04:58:42,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733201922087Initializing all the Stores at 1733201922089 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201922089Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201922089Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201922089Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201922089Cleaning up temporary data from old regions at 1733201922104 (+15 ms)Region opened successfully at 1733201922117 (+13 ms) 2024-12-03T04:58:42,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T04:58:42,117 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T04:58:42,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T04:58:42,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T04:58:42,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T04:58:42,118 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T04:58:42,118 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733201922117Disabling compacts and flushes for region at 1733201922117Disabling writes for close at 1733201922117Writing region close event to WAL at 1733201922118 (+1 ms)Closed at 1733201922118 2024-12-03T04:58:42,125 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T04:58:42,125 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T04:58:42,125 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T04:58:42,127 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T04:58:42,129 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T04:58:42,280 DEBUG [32beccd76b35:45277 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T04:58:42,281 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=32beccd76b35,34955,1733201921396 2024-12-03T04:58:42,283 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 32beccd76b35,34955,1733201921396, state=OPENING 2024-12-03T04:58:42,286 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T04:58:42,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:42,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:42,290 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T04:58:42,290 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T04:58:42,290 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T04:58:42,290 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=32beccd76b35,34955,1733201921396}] 2024-12-03T04:58:42,445 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T04:58:42,448 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50665, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T04:58:42,453 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T04:58:42,454 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T04:58:42,456 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C34955%2C1733201921396.meta, suffix=.meta, logDir=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/WALs/32beccd76b35,34955,1733201921396, archiveDir=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/oldWALs, maxLogs=32 2024-12-03T04:58:42,458 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C34955%2C1733201921396.meta.1733201922458.meta 2024-12-03T04:58:42,464 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/WALs/32beccd76b35,34955,1733201921396/32beccd76b35%2C34955%2C1733201921396.meta.1733201922458.meta 2024-12-03T04:58:42,465 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44905:44905),(127.0.0.1/127.0.0.1:37373:37373)] 2024-12-03T04:58:42,466 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T04:58:42,466 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T04:58:42,467 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T04:58:42,467 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T04:58:42,467 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T04:58:42,467 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:58:42,467 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T04:58:42,467 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T04:58:42,469 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T04:58:42,470 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T04:58:42,470 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:42,470 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:42,471 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T04:58:42,471 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T04:58:42,472 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:42,472 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:42,472 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T04:58:42,474 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T04:58:42,474 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:42,474 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:42,474 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T04:58:42,475 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T04:58:42,475 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:42,476 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:42,476 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T04:58:42,478 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/data/hbase/meta/1588230740 2024-12-03T04:58:42,483 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/data/hbase/meta/1588230740 2024-12-03T04:58:42,485 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T04:58:42,485 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T04:58:42,485 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T04:58:42,487 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T04:58:42,489 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=884070, jitterRate=0.12415428459644318}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T04:58:42,489 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T04:58:42,490 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733201922467Writing region info on filesystem at 1733201922467Initializing all the Stores at 1733201922468 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201922468Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201922469 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201922469Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201922469Cleaning up temporary data from old regions at 1733201922485 (+16 ms)Running coprocessor post-open hooks at 1733201922489 (+4 ms)Region opened successfully at 1733201922490 (+1 ms) 2024-12-03T04:58:42,491 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733201922445 2024-12-03T04:58:42,494 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T04:58:42,494 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T04:58:42,496 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=32beccd76b35,34955,1733201921396 2024-12-03T04:58:42,497 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 32beccd76b35,34955,1733201921396, state=OPEN 2024-12-03T04:58:42,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T04:58:42,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T04:58:42,503 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T04:58:42,503 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T04:58:42,503 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=32beccd76b35,34955,1733201921396 2024-12-03T04:58:42,507 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T04:58:42,507 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=32beccd76b35,34955,1733201921396 in 213 msec 2024-12-03T04:58:42,511 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T04:58:42,511 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 382 msec 2024-12-03T04:58:42,512 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T04:58:42,512 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T04:58:42,515 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T04:58:42,515 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=32beccd76b35,34955,1733201921396, seqNum=-1] 2024-12-03T04:58:42,515 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T04:58:42,517 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54949, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T04:58:42,524 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 862 msec 2024-12-03T04:58:42,525 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733201922525, completionTime=-1 2024-12-03T04:58:42,525 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T04:58:42,525 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T04:58:42,527 INFO [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T04:58:42,527 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733201982527 2024-12-03T04:58:42,527 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733202042527 2024-12-03T04:58:42,527 INFO [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-03T04:58:42,527 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,45277,1733201921337-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:42,528 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,45277,1733201921337-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:42,528 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,45277,1733201921337-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:42,528 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-32beccd76b35:45277, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:42,528 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:42,530 DEBUG [master/32beccd76b35:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T04:58:42,533 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:42,538 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.079sec 2024-12-03T04:58:42,538 INFO [master/32beccd76b35:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T04:58:42,538 INFO [master/32beccd76b35:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T04:58:42,538 INFO [master/32beccd76b35:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T04:58:42,538 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T04:58:42,538 INFO [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T04:58:42,538 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,45277,1733201921337-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T04:58:42,538 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,45277,1733201921337-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T04:58:42,542 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T04:58:42,542 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T04:58:42,542 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,45277,1733201921337-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:42,632 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c59b1da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T04:58:42,632 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 32beccd76b35,45277,-1 for getting cluster id 2024-12-03T04:58:42,632 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T04:58:42,634 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd6fcfc5c-f64c-4da0-a018-5dfb97a0e8f1' 2024-12-03T04:58:42,635 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T04:58:42,635 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d6fcfc5c-f64c-4da0-a018-5dfb97a0e8f1" 2024-12-03T04:58:42,635 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7047af98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T04:58:42,635 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [32beccd76b35,45277,-1] 2024-12-03T04:58:42,636 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T04:58:42,636 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:58:42,638 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41480, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T04:58:42,640 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d7283c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T04:58:42,640 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T04:58:42,641 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=32beccd76b35,34955,1733201921396, seqNum=-1] 2024-12-03T04:58:42,642 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T04:58:42,644 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49338, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T04:58:42,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=32beccd76b35,45277,1733201921337 2024-12-03T04:58:42,646 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:42,650 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T04:58:42,650 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T04:58:42,650 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T04:58:42,650 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T04:58:42,650 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:58:42,650 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:58:42,650 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T04:58:42,650 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T04:58:42,651 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=398545913, stopped=false 2024-12-03T04:58:42,651 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=32beccd76b35,45277,1733201921337 2024-12-03T04:58:42,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T04:58:42,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T04:58:42,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:42,653 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T04:58:42,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:42,653 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T04:58:42,653 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T04:58:42,653 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:58:42,654 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '32beccd76b35,34955,1733201921396' ***** 2024-12-03T04:58:42,654 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T04:58:42,654 INFO [RS:0;32beccd76b35:34955 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T04:58:42,654 INFO [RS:0;32beccd76b35:34955 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T04:58:42,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:58:42,654 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T04:58:42,654 INFO [RS:0;32beccd76b35:34955 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T04:58:42,654 INFO [RS:0;32beccd76b35:34955 {}] regionserver.HRegionServer(959): stopping server 32beccd76b35,34955,1733201921396 2024-12-03T04:58:42,654 INFO [RS:0;32beccd76b35:34955 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T04:58:42,654 INFO [RS:0;32beccd76b35:34955 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;32beccd76b35:34955. 2024-12-03T04:58:42,654 DEBUG [RS:0;32beccd76b35:34955 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T04:58:42,654 DEBUG [RS:0;32beccd76b35:34955 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:58:42,655 INFO [RS:0;32beccd76b35:34955 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T04:58:42,655 INFO [RS:0;32beccd76b35:34955 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T04:58:42,655 INFO [RS:0;32beccd76b35:34955 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T04:58:42,655 INFO [RS:0;32beccd76b35:34955 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T04:58:42,655 INFO [RS:0;32beccd76b35:34955 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-03T04:58:42,655 DEBUG [RS:0;32beccd76b35:34955 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-03T04:58:42,655 DEBUG [RS:0;32beccd76b35:34955 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-03T04:58:42,655 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T04:58:42,655 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T04:58:42,655 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T04:58:42,655 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T04:58:42,656 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T04:58:42,656 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-03T04:58:42,661 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:58:42,679 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/data/hbase/meta/1588230740/.tmp/ns/f7fc02d24e66458cbe95a9e943ac2696 is 43, key is default/ns:d/1733201922518/Put/seqid=0 2024-12-03T04:58:42,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33875 is added to blk_1073741835_1011 (size=5153) 2024-12-03T04:58:42,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37485 is added to blk_1073741835_1011 (size=5153) 2024-12-03T04:58:42,687 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/data/hbase/meta/1588230740/.tmp/ns/f7fc02d24e66458cbe95a9e943ac2696 2024-12-03T04:58:42,699 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/data/hbase/meta/1588230740/.tmp/ns/f7fc02d24e66458cbe95a9e943ac2696 as hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/data/hbase/meta/1588230740/ns/f7fc02d24e66458cbe95a9e943ac2696 2024-12-03T04:58:42,707 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/data/hbase/meta/1588230740/ns/f7fc02d24e66458cbe95a9e943ac2696, entries=2, sequenceid=6, filesize=5.0 K 2024-12-03T04:58:42,708 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 52ms, sequenceid=6, compaction requested=false 2024-12-03T04:58:42,709 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T04:58:42,716 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T04:58:42,718 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T04:58:42,718 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T04:58:42,718 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733201922655Running coprocessor pre-close hooks at 1733201922655Disabling compacts and flushes for region at 1733201922655Disabling writes for close at 1733201922655Obtaining lock to block concurrent updates at 1733201922656 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733201922656Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733201922656Flushing stores of hbase:meta,,1.1588230740 at 1733201922657 (+1 ms)Flushing 1588230740/ns: creating writer at 1733201922657Flushing 1588230740/ns: appending metadata at 1733201922678 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1733201922678Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14cef2f3: reopening flushed file at 1733201922697 (+19 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 52ms, sequenceid=6, compaction requested=false at 1733201922709 (+12 ms)Writing region close event to WAL at 1733201922711 (+2 ms)Running coprocessor post-close hooks at 1733201922718 (+7 ms)Closed at 1733201922718 2024-12-03T04:58:42,718 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T04:58:42,833 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T04:58:42,833 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T04:58:42,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-03T04:58:42,855 INFO [RS:0;32beccd76b35:34955 {}] regionserver.HRegionServer(976): stopping server 32beccd76b35,34955,1733201921396; all regions closed. 2024-12-03T04:58:42,856 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:42,856 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:42,856 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:42,856 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:42,857 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:42,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37485 is added to blk_1073741834_1010 (size=1152) 2024-12-03T04:58:42,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33875 is added to blk_1073741834_1010 (size=1152) 2024-12-03T04:58:42,863 DEBUG [RS:0;32beccd76b35:34955 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/oldWALs 2024-12-03T04:58:42,863 INFO [RS:0;32beccd76b35:34955 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 32beccd76b35%2C34955%2C1733201921396.meta:.meta(num 1733201922458) 2024-12-03T04:58:42,864 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:42,864 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:42,864 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:42,864 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:42,864 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:42,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37485 is added to blk_1073741833_1009 (size=93) 2024-12-03T04:58:42,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33875 is added to blk_1073741833_1009 (size=93) 2024-12-03T04:58:42,869 DEBUG [RS:0;32beccd76b35:34955 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/oldWALs 2024-12-03T04:58:42,869 INFO [RS:0;32beccd76b35:34955 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 32beccd76b35%2C34955%2C1733201921396:(num 1733201921929) 2024-12-03T04:58:42,869 DEBUG [RS:0;32beccd76b35:34955 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:58:42,869 INFO [RS:0;32beccd76b35:34955 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T04:58:42,869 INFO [RS:0;32beccd76b35:34955 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T04:58:42,870 INFO [RS:0;32beccd76b35:34955 {}] hbase.ChoreService(370): Chore service for: regionserver/32beccd76b35:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-03T04:58:42,870 INFO [RS:0;32beccd76b35:34955 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T04:58:42,870 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T04:58:42,870 INFO [RS:0;32beccd76b35:34955 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34955 2024-12-03T04:58:42,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/32beccd76b35,34955,1733201921396 2024-12-03T04:58:42,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T04:58:42,872 INFO [RS:0;32beccd76b35:34955 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T04:58:42,875 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [32beccd76b35,34955,1733201921396] 2024-12-03T04:58:42,876 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/32beccd76b35,34955,1733201921396 already deleted, retry=false 2024-12-03T04:58:42,876 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 32beccd76b35,34955,1733201921396 expired; onlineServers=0 2024-12-03T04:58:42,876 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '32beccd76b35,45277,1733201921337' ***** 2024-12-03T04:58:42,876 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T04:58:42,876 INFO [M:0;32beccd76b35:45277 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T04:58:42,877 INFO [M:0;32beccd76b35:45277 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T04:58:42,877 DEBUG [M:0;32beccd76b35:45277 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T04:58:42,877 DEBUG [M:0;32beccd76b35:45277 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T04:58:42,877 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T04:58:42,877 DEBUG [master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733201921668 {}] cleaner.HFileCleaner(306): Exit Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733201921668,5,FailOnTimeoutGroup] 2024-12-03T04:58:42,877 DEBUG [master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733201921667 {}] cleaner.HFileCleaner(306): Exit Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733201921667,5,FailOnTimeoutGroup] 2024-12-03T04:58:42,877 INFO [M:0;32beccd76b35:45277 {}] hbase.ChoreService(370): Chore service for: master/32beccd76b35:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T04:58:42,877 INFO [M:0;32beccd76b35:45277 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T04:58:42,877 DEBUG [M:0;32beccd76b35:45277 {}] master.HMaster(1795): Stopping service threads 2024-12-03T04:58:42,877 INFO [M:0;32beccd76b35:45277 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T04:58:42,877 INFO [M:0;32beccd76b35:45277 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T04:58:42,878 INFO [M:0;32beccd76b35:45277 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T04:58:42,878 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T04:58:42,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T04:58:42,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:42,879 DEBUG [M:0;32beccd76b35:45277 {}] zookeeper.ZKUtil(347): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T04:58:42,879 WARN [M:0;32beccd76b35:45277 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T04:58:42,880 INFO [M:0;32beccd76b35:45277 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/.lastflushedseqids 2024-12-03T04:58:42,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33875 is added to blk_1073741836_1012 (size=99) 2024-12-03T04:58:42,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37485 is added to blk_1073741836_1012 (size=99) 2024-12-03T04:58:42,887 INFO [M:0;32beccd76b35:45277 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T04:58:42,887 INFO [M:0;32beccd76b35:45277 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T04:58:42,887 DEBUG [M:0;32beccd76b35:45277 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T04:58:42,887 INFO [M:0;32beccd76b35:45277 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:58:42,887 DEBUG [M:0;32beccd76b35:45277 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:58:42,887 DEBUG [M:0;32beccd76b35:45277 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T04:58:42,887 DEBUG [M:0;32beccd76b35:45277 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:58:42,887 INFO [M:0;32beccd76b35:45277 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-03T04:58:42,906 DEBUG [M:0;32beccd76b35:45277 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ca1e2900cc604bae813a602c20241713 is 82, key is hbase:meta,,1/info:regioninfo/1733201922495/Put/seqid=0 2024-12-03T04:58:42,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37485 is added to blk_1073741837_1013 (size=5672) 2024-12-03T04:58:42,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33875 is added to blk_1073741837_1013 (size=5672) 2024-12-03T04:58:42,914 INFO [M:0;32beccd76b35:45277 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ca1e2900cc604bae813a602c20241713 2024-12-03T04:58:42,944 DEBUG [M:0;32beccd76b35:45277 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4f7919f3024c433c9e3857a5eb62c86b is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733201922523/Put/seqid=0 2024-12-03T04:58:42,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33875 is added to blk_1073741838_1014 (size=5275) 2024-12-03T04:58:42,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37485 is added to blk_1073741838_1014 (size=5275) 2024-12-03T04:58:42,975 INFO [RS:0;32beccd76b35:34955 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T04:58:42,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T04:58:42,975 INFO [RS:0;32beccd76b35:34955 {}] regionserver.HRegionServer(1031): Exiting; stopping=32beccd76b35,34955,1733201921396; zookeeper connection closed. 2024-12-03T04:58:42,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34955-0x101517579be0001, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T04:58:42,975 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7cf21c7f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7cf21c7f 2024-12-03T04:58:42,976 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T04:58:43,356 INFO [M:0;32beccd76b35:45277 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4f7919f3024c433c9e3857a5eb62c86b 2024-12-03T04:58:43,380 DEBUG [M:0;32beccd76b35:45277 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7b076315aefe46d6b64e114554ca6325 is 69, key is 32beccd76b35,34955,1733201921396/rs:state/1733201921758/Put/seqid=0 2024-12-03T04:58:43,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37485 is added to blk_1073741839_1015 (size=5156) 2024-12-03T04:58:43,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33875 is added to blk_1073741839_1015 (size=5156) 2024-12-03T04:58:43,387 INFO [M:0;32beccd76b35:45277 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7b076315aefe46d6b64e114554ca6325 2024-12-03T04:58:43,413 DEBUG [M:0;32beccd76b35:45277 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/213938129f2341ba9dc69c19c34bb678 is 52, key is load_balancer_on/state:d/1733201922648/Put/seqid=0 2024-12-03T04:58:43,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37485 is added to blk_1073741840_1016 (size=5056) 2024-12-03T04:58:43,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33875 is added to blk_1073741840_1016 (size=5056) 2024-12-03T04:58:43,419 INFO [M:0;32beccd76b35:45277 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/213938129f2341ba9dc69c19c34bb678 2024-12-03T04:58:43,426 DEBUG [M:0;32beccd76b35:45277 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ca1e2900cc604bae813a602c20241713 as hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ca1e2900cc604bae813a602c20241713 2024-12-03T04:58:43,434 INFO [M:0;32beccd76b35:45277 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ca1e2900cc604bae813a602c20241713, entries=8, sequenceid=29, filesize=5.5 K 2024-12-03T04:58:43,436 DEBUG [M:0;32beccd76b35:45277 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/4f7919f3024c433c9e3857a5eb62c86b as hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4f7919f3024c433c9e3857a5eb62c86b 2024-12-03T04:58:43,442 INFO [M:0;32beccd76b35:45277 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/4f7919f3024c433c9e3857a5eb62c86b, entries=3, sequenceid=29, filesize=5.2 K 2024-12-03T04:58:43,443 DEBUG [M:0;32beccd76b35:45277 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7b076315aefe46d6b64e114554ca6325 as hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7b076315aefe46d6b64e114554ca6325 2024-12-03T04:58:43,449 INFO [M:0;32beccd76b35:45277 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7b076315aefe46d6b64e114554ca6325, entries=1, sequenceid=29, filesize=5.0 K 2024-12-03T04:58:43,451 DEBUG [M:0;32beccd76b35:45277 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/213938129f2341ba9dc69c19c34bb678 as hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/213938129f2341ba9dc69c19c34bb678 2024-12-03T04:58:43,457 INFO [M:0;32beccd76b35:45277 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41733/user/jenkins/test-data/631ebe43-4ab8-d7ab-b0b7-2c68e78b7aea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/213938129f2341ba9dc69c19c34bb678, entries=1, sequenceid=29, filesize=4.9 K 2024-12-03T04:58:43,460 INFO [M:0;32beccd76b35:45277 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 573ms, sequenceid=29, compaction requested=false 2024-12-03T04:58:43,466 INFO [M:0;32beccd76b35:45277 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:58:43,466 DEBUG [M:0;32beccd76b35:45277 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733201922887Disabling compacts and flushes for region at 1733201922887Disabling writes for close at 1733201922887Obtaining lock to block concurrent updates at 1733201922887Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733201922887Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733201922888 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733201922889 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733201922889Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733201922905 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733201922905Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733201922921 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733201922944 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733201922944Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733201923363 (+419 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733201923380 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733201923380Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733201923394 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733201923412 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733201923412Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51695bf3: reopening flushed file at 1733201923425 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e95392d: reopening flushed file at 1733201923434 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c3873a4: reopening flushed file at 1733201923442 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@144028cb: reopening flushed file at 1733201923450 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 573ms, sequenceid=29, compaction requested=false at 1733201923460 (+10 ms)Writing region close event to WAL at 1733201923466 (+6 ms)Closed at 1733201923466 2024-12-03T04:58:43,467 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:43,467 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:43,467 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:43,467 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:43,468 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:43,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37485 is added to blk_1073741830_1006 (size=10311) 2024-12-03T04:58:43,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33875 is added to blk_1073741830_1006 (size=10311) 2024-12-03T04:58:43,471 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T04:58:43,471 INFO [M:0;32beccd76b35:45277 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T04:58:43,471 INFO [M:0;32beccd76b35:45277 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45277 2024-12-03T04:58:43,472 INFO [M:0;32beccd76b35:45277 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T04:58:43,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T04:58:43,577 INFO [M:0;32beccd76b35:45277 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T04:58:43,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45277-0x101517579be0000, quorum=127.0.0.1:62804, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T04:58:43,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7d7dc570{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:58:43,586 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@f926f6b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:58:43,587 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:58:43,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19a8d3ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:58:43,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e48ac18{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/hadoop.log.dir/,STOPPED} 2024-12-03T04:58:43,589 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T04:58:43,589 WARN [BP-7222034-172.17.0.3-1733201920489 heartbeating to localhost/127.0.0.1:41733 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T04:58:43,589 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T04:58:43,590 WARN [BP-7222034-172.17.0.3-1733201920489 heartbeating to localhost/127.0.0.1:41733 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-7222034-172.17.0.3-1733201920489 (Datanode Uuid 5590a3aa-ed19-46f1-a63c-86507005d77b) service to localhost/127.0.0.1:41733 2024-12-03T04:58:43,590 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/cluster_e37b92ff-78c9-ecad-43ef-53347177872c/data/data3/current/BP-7222034-172.17.0.3-1733201920489 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:58:43,591 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/cluster_e37b92ff-78c9-ecad-43ef-53347177872c/data/data4/current/BP-7222034-172.17.0.3-1733201920489 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:58:43,591 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T04:58:43,594 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4227a026{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:58:43,594 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5c18786c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:58:43,595 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:58:43,595 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78694030{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:58:43,595 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ee6bbfa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/hadoop.log.dir/,STOPPED} 2024-12-03T04:58:43,597 WARN [BP-7222034-172.17.0.3-1733201920489 heartbeating to localhost/127.0.0.1:41733 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T04:58:43,597 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T04:58:43,597 WARN [BP-7222034-172.17.0.3-1733201920489 heartbeating to localhost/127.0.0.1:41733 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-7222034-172.17.0.3-1733201920489 (Datanode Uuid 0491666b-7ba0-4ec6-adbe-4a6a9f0bb4d9) service to localhost/127.0.0.1:41733 2024-12-03T04:58:43,597 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T04:58:43,598 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/cluster_e37b92ff-78c9-ecad-43ef-53347177872c/data/data1/current/BP-7222034-172.17.0.3-1733201920489 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:58:43,598 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/cluster_e37b92ff-78c9-ecad-43ef-53347177872c/data/data2/current/BP-7222034-172.17.0.3-1733201920489 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:58:43,598 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T04:58:43,605 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3715268d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T04:58:43,606 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f9bf265{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:58:43,606 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:58:43,606 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@715839fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:58:43,606 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2db53ea5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/hadoop.log.dir/,STOPPED} 2024-12-03T04:58:43,614 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T04:58:43,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T04:58:43,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T04:58:43,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/hadoop.log.dir so I do NOT create it in target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32 2024-12-03T04:58:43,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b4cc9e07-ecaa-1671-25e5-156d00660ab7/hadoop.tmp.dir so I do NOT create it in target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32 2024-12-03T04:58:43,638 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db, deleteOnExit=true 2024-12-03T04:58:43,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T04:58:43,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/test.cache.data in system properties and HBase conf 2024-12-03T04:58:43,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T04:58:43,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.log.dir in system properties and HBase conf 2024-12-03T04:58:43,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T04:58:43,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T04:58:43,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T04:58:43,639 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T04:58:43,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T04:58:43,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T04:58:43,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T04:58:43,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T04:58:43,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T04:58:43,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T04:58:43,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T04:58:43,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T04:58:43,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T04:58:43,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/nfs.dump.dir in system properties and HBase conf 2024-12-03T04:58:43,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/java.io.tmpdir in system properties and HBase conf 2024-12-03T04:58:43,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T04:58:43,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T04:58:43,640 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T04:58:43,656 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T04:58:43,747 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:58:43,754 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:58:43,759 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:58:43,759 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:58:43,760 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T04:58:43,761 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:58:43,762 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f3a2d0b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:58:43,762 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a18dcd3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:58:43,777 INFO [regionserver/32beccd76b35:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T04:58:43,899 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:58:43,903 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b7ed0d6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/java.io.tmpdir/jetty-localhost-44883-hadoop-hdfs-3_4_1-tests_jar-_-any-6412644228111401150/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T04:58:43,904 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3a9c918{HTTP/1.1, (http/1.1)}{localhost:44883} 2024-12-03T04:58:43,904 INFO [Time-limited test {}] server.Server(415): Started @108066ms 2024-12-03T04:58:43,905 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:58:43,930 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T04:58:43,999 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T04:58:44,002 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:58:44,021 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:58:44,024 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:58:44,025 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:58:44,041 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:58:44,046 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:58:44,052 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:58:44,052 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:58:44,053 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T04:58:44,053 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@795b72e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:58:44,054 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6277b6b4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:58:44,196 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@36912d0f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/java.io.tmpdir/jetty-localhost-40253-hadoop-hdfs-3_4_1-tests_jar-_-any-14978694828031105314/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:58:44,196 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@253e01a{HTTP/1.1, (http/1.1)}{localhost:40253} 2024-12-03T04:58:44,196 INFO [Time-limited test {}] server.Server(415): Started @108358ms 2024-12-03T04:58:44,198 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T04:58:44,240 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:58:44,243 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:58:44,244 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:58:44,244 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:58:44,244 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T04:58:44,245 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5aaecb38{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:58:44,245 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47d3f760{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:58:44,312 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data1/current/BP-851166150-172.17.0.3-1733201923677/current, will proceed with Du for space computation calculation, 2024-12-03T04:58:44,312 WARN [Thread-659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data2/current/BP-851166150-172.17.0.3-1733201923677/current, will proceed with Du for space computation calculation, 2024-12-03T04:58:44,341 WARN [Thread-637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T04:58:44,344 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x598797ec5840e62a with lease ID 0x9c1667867a4b1487: Processing first storage report for DS-1d3a0383-99f1-4153-9174-40e3212a865e from datanode DatanodeRegistration(127.0.0.1:45599, datanodeUuid=65c66e99-3abb-4257-b6c4-a79e2df13ead, infoPort=44321, infoSecurePort=0, ipcPort=44181, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677) 2024-12-03T04:58:44,345 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x598797ec5840e62a with lease ID 0x9c1667867a4b1487: from storage DS-1d3a0383-99f1-4153-9174-40e3212a865e node DatanodeRegistration(127.0.0.1:45599, datanodeUuid=65c66e99-3abb-4257-b6c4-a79e2df13ead, infoPort=44321, infoSecurePort=0, ipcPort=44181, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:58:44,345 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x598797ec5840e62a with lease ID 0x9c1667867a4b1487: Processing first storage report for DS-526f50b1-eda4-4bf3-b0ce-a5210df2e1fb from datanode DatanodeRegistration(127.0.0.1:45599, datanodeUuid=65c66e99-3abb-4257-b6c4-a79e2df13ead, infoPort=44321, infoSecurePort=0, ipcPort=44181, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677) 2024-12-03T04:58:44,345 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x598797ec5840e62a with lease ID 0x9c1667867a4b1487: from storage DS-526f50b1-eda4-4bf3-b0ce-a5210df2e1fb node DatanodeRegistration(127.0.0.1:45599, datanodeUuid=65c66e99-3abb-4257-b6c4-a79e2df13ead, infoPort=44321, infoSecurePort=0, ipcPort=44181, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:58:44,403 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7748d0b0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/java.io.tmpdir/jetty-localhost-35037-hadoop-hdfs-3_4_1-tests_jar-_-any-14795091160579705963/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:58:44,404 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@29fc42c9{HTTP/1.1, (http/1.1)}{localhost:35037} 2024-12-03T04:58:44,404 INFO [Time-limited test {}] server.Server(415): Started @108566ms 2024-12-03T04:58:44,406 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T04:58:44,536 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data3/current/BP-851166150-172.17.0.3-1733201923677/current, will proceed with Du for space computation calculation, 2024-12-03T04:58:44,536 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data4/current/BP-851166150-172.17.0.3-1733201923677/current, will proceed with Du for space computation calculation, 2024-12-03T04:58:44,574 WARN [Thread-673 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T04:58:44,577 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb5b5807913cb08f9 with lease ID 0x9c1667867a4b1488: Processing first storage report for DS-f1b292ff-4c29-4e13-be8c-722756eca30c from datanode DatanodeRegistration(127.0.0.1:38707, datanodeUuid=b8f66bf4-78e5-4a33-b801-cf0b5bb96189, infoPort=46347, infoSecurePort=0, ipcPort=42299, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677) 2024-12-03T04:58:44,577 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb5b5807913cb08f9 with lease ID 0x9c1667867a4b1488: from storage DS-f1b292ff-4c29-4e13-be8c-722756eca30c node DatanodeRegistration(127.0.0.1:38707, datanodeUuid=b8f66bf4-78e5-4a33-b801-cf0b5bb96189, infoPort=46347, infoSecurePort=0, ipcPort=42299, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:58:44,577 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb5b5807913cb08f9 with lease ID 0x9c1667867a4b1488: Processing first storage report for DS-e2141e60-29f6-4d6d-9646-76d81ab1d44d from datanode DatanodeRegistration(127.0.0.1:38707, datanodeUuid=b8f66bf4-78e5-4a33-b801-cf0b5bb96189, infoPort=46347, infoSecurePort=0, ipcPort=42299, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677) 2024-12-03T04:58:44,577 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb5b5807913cb08f9 with lease ID 0x9c1667867a4b1488: from storage DS-e2141e60-29f6-4d6d-9646-76d81ab1d44d node DatanodeRegistration(127.0.0.1:38707, datanodeUuid=b8f66bf4-78e5-4a33-b801-cf0b5bb96189, infoPort=46347, infoSecurePort=0, ipcPort=42299, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:58:44,657 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32 2024-12-03T04:58:44,668 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/zookeeper_0, clientPort=50617, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T04:58:44,669 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50617 2024-12-03T04:58:44,669 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:44,671 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:44,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38707 is added to blk_1073741825_1001 (size=7) 2024-12-03T04:58:44,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45599 is added to blk_1073741825_1001 (size=7) 2024-12-03T04:58:44,715 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0 with version=8 2024-12-03T04:58:44,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/hbase-staging 2024-12-03T04:58:44,719 INFO [Time-limited test {}] client.ConnectionUtils(128): master/32beccd76b35:0 server-side Connection retries=45 2024-12-03T04:58:44,719 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:58:44,719 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T04:58:44,719 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T04:58:44,719 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:58:44,719 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T04:58:44,719 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T04:58:44,719 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T04:58:44,742 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35531 2024-12-03T04:58:44,744 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35531 connecting to ZooKeeper ensemble=127.0.0.1:50617 2024-12-03T04:58:44,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:355310x0, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T04:58:44,757 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35531-0x101517586e10000 connected 2024-12-03T04:58:44,782 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:44,784 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:44,787 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:58:44,787 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0, hbase.cluster.distributed=false 2024-12-03T04:58:44,789 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T04:58:44,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35531 2024-12-03T04:58:44,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35531 2024-12-03T04:58:44,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35531 2024-12-03T04:58:44,801 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35531 2024-12-03T04:58:44,803 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35531 2024-12-03T04:58:44,824 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/32beccd76b35:0 server-side Connection retries=45 2024-12-03T04:58:44,825 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:58:44,825 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T04:58:44,825 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T04:58:44,825 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:58:44,825 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T04:58:44,825 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T04:58:44,825 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T04:58:44,826 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44203 2024-12-03T04:58:44,828 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44203 connecting to ZooKeeper ensemble=127.0.0.1:50617 2024-12-03T04:58:44,828 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:44,831 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:44,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:442030x0, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T04:58:44,837 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44203-0x101517586e10001 connected 2024-12-03T04:58:44,837 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:58:44,837 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T04:58:44,838 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T04:58:44,839 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T04:58:44,840 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T04:58:44,841 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44203 2024-12-03T04:58:44,841 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44203 2024-12-03T04:58:44,849 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44203 2024-12-03T04:58:44,853 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44203 2024-12-03T04:58:44,854 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44203 2024-12-03T04:58:44,869 DEBUG [M:0;32beccd76b35:35531 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;32beccd76b35:35531 2024-12-03T04:58:44,869 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/32beccd76b35,35531,1733201924718 2024-12-03T04:58:44,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T04:58:44,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T04:58:44,872 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/32beccd76b35,35531,1733201924718 2024-12-03T04:58:44,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T04:58:44,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:44,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:44,875 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T04:58:44,875 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/32beccd76b35,35531,1733201924718 from backup master directory 2024-12-03T04:58:44,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/32beccd76b35,35531,1733201924718 2024-12-03T04:58:44,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T04:58:44,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T04:58:44,877 WARN [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T04:58:44,877 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=32beccd76b35,35531,1733201924718 2024-12-03T04:58:44,887 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/hbase.id] with ID: 46411542-9ab6-4669-ba81-c8b9b89e0eb7 2024-12-03T04:58:44,887 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/.tmp/hbase.id 2024-12-03T04:58:44,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45599 is added to blk_1073741826_1002 (size=42) 2024-12-03T04:58:44,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38707 is added to blk_1073741826_1002 (size=42) 2024-12-03T04:58:44,902 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/.tmp/hbase.id]:[hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/hbase.id] 2024-12-03T04:58:44,920 INFO [master/32beccd76b35:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:44,920 INFO [master/32beccd76b35:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T04:58:44,922 INFO [master/32beccd76b35:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-03T04:58:44,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:44,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:44,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38707 is added to blk_1073741827_1003 (size=196) 2024-12-03T04:58:44,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45599 is added to blk_1073741827_1003 (size=196) 2024-12-03T04:58:44,937 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T04:58:44,938 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T04:58:44,939 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T04:58:44,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45599 is added to blk_1073741828_1004 (size=1189) 2024-12-03T04:58:44,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38707 is added to blk_1073741828_1004 (size=1189) 2024-12-03T04:58:44,950 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store 2024-12-03T04:58:44,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38707 is added to blk_1073741829_1005 (size=34) 2024-12-03T04:58:44,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45599 is added to blk_1073741829_1005 (size=34) 2024-12-03T04:58:44,968 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:58:44,968 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T04:58:44,968 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:58:44,968 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:58:44,968 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T04:58:44,968 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:58:44,968 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:58:44,969 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733201924968Disabling compacts and flushes for region at 1733201924968Disabling writes for close at 1733201924968Writing region close event to WAL at 1733201924968Closed at 1733201924968 2024-12-03T04:58:44,969 WARN [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/.initializing 2024-12-03T04:58:44,970 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/WALs/32beccd76b35,35531,1733201924718 2024-12-03T04:58:44,973 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C35531%2C1733201924718, suffix=, logDir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/WALs/32beccd76b35,35531,1733201924718, archiveDir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/oldWALs, maxLogs=10 2024-12-03T04:58:44,973 INFO [master/32beccd76b35:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C35531%2C1733201924718.1733201924973 2024-12-03T04:58:44,981 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/WALs/32beccd76b35,35531,1733201924718/32beccd76b35%2C35531%2C1733201924718.1733201924973 2024-12-03T04:58:44,983 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44321:44321),(127.0.0.1/127.0.0.1:46347:46347)] 2024-12-03T04:58:44,985 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T04:58:44,985 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:58:44,985 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:44,985 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:44,987 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:44,989 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T04:58:44,989 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:44,990 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:44,990 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:44,992 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T04:58:44,992 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:44,992 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T04:58:44,993 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:44,997 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T04:58:44,997 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:44,998 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T04:58:44,998 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:45,000 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T04:58:45,000 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:45,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T04:58:45,001 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:45,002 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:45,002 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:45,004 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:45,005 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:45,005 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T04:58:45,007 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:58:45,010 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T04:58:45,010 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=711095, jitterRate=-0.09579694271087646}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T04:58:45,012 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733201924985Initializing all the Stores at 1733201924987 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201924987Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201924987Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201924987Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201924987Cleaning up temporary data from old regions at 1733201925005 (+18 ms)Region opened successfully at 1733201925012 (+7 ms) 2024-12-03T04:58:45,012 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T04:58:45,017 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f5a3c1f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32beccd76b35/172.17.0.3:0 2024-12-03T04:58:45,018 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T04:58:45,018 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T04:58:45,018 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T04:58:45,018 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T04:58:45,019 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T04:58:45,019 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T04:58:45,019 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T04:58:45,022 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T04:58:45,023 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T04:58:45,025 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T04:58:45,025 INFO [master/32beccd76b35:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T04:58:45,026 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T04:58:45,027 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T04:58:45,028 INFO [master/32beccd76b35:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T04:58:45,030 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T04:58:45,031 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T04:58:45,032 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T04:58:45,034 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T04:58:45,036 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T04:58:45,038 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T04:58:45,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T04:58:45,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T04:58:45,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:45,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:45,041 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=32beccd76b35,35531,1733201924718, sessionid=0x101517586e10000, setting cluster-up flag (Was=false) 2024-12-03T04:58:45,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:45,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:45,053 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T04:58:45,055 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=32beccd76b35,35531,1733201924718 2024-12-03T04:58:45,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:45,060 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:45,065 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T04:58:45,067 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=32beccd76b35,35531,1733201924718 2024-12-03T04:58:45,068 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T04:58:45,070 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T04:58:45,071 INFO [master/32beccd76b35:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T04:58:45,071 INFO [master/32beccd76b35:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T04:58:45,071 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 32beccd76b35,35531,1733201924718 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T04:58:45,073 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T04:58:45,073 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T04:58:45,073 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T04:58:45,073 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T04:58:45,073 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/32beccd76b35:0, corePoolSize=10, maxPoolSize=10 2024-12-03T04:58:45,073 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,073 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/32beccd76b35:0, corePoolSize=2, maxPoolSize=2 2024-12-03T04:58:45,073 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,074 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733201955074 2024-12-03T04:58:45,074 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T04:58:45,074 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T04:58:45,074 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T04:58:45,074 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T04:58:45,074 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T04:58:45,074 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T04:58:45,075 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,075 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T04:58:45,075 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T04:58:45,075 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T04:58:45,075 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T04:58:45,075 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T04:58:45,076 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T04:58:45,076 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T04:58:45,076 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733201925076,5,FailOnTimeoutGroup] 2024-12-03T04:58:45,076 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733201925076,5,FailOnTimeoutGroup] 2024-12-03T04:58:45,076 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,077 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:45,077 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T04:58:45,077 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,077 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,077 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T04:58:45,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45599 is added to blk_1073741831_1007 (size=1321) 2024-12-03T04:58:45,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38707 is added to blk_1073741831_1007 (size=1321) 2024-12-03T04:58:45,087 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T04:58:45,087 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0 2024-12-03T04:58:45,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38707 is added to blk_1073741832_1008 (size=32) 2024-12-03T04:58:45,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45599 is added to blk_1073741832_1008 (size=32) 2024-12-03T04:58:45,096 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:58:45,098 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T04:58:45,100 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T04:58:45,100 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:45,100 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:45,100 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T04:58:45,102 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T04:58:45,102 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:45,102 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:45,102 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T04:58:45,104 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T04:58:45,104 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:45,104 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:45,104 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T04:58:45,106 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T04:58:45,106 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:45,106 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:45,106 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T04:58:45,107 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740 2024-12-03T04:58:45,108 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740 2024-12-03T04:58:45,110 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T04:58:45,110 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T04:58:45,110 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T04:58:45,112 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T04:58:45,115 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T04:58:45,116 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=721062, jitterRate=-0.08312344551086426}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T04:58:45,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733201925097Initializing all the Stores at 1733201925098 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201925098Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201925098Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201925098Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201925098Cleaning up temporary data from old regions at 1733201925110 (+12 ms)Region opened successfully at 1733201925117 (+7 ms) 2024-12-03T04:58:45,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T04:58:45,117 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T04:58:45,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T04:58:45,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T04:58:45,117 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T04:58:45,118 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T04:58:45,118 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733201925117Disabling compacts and flushes for region at 1733201925117Disabling writes for close at 1733201925117Writing region close event to WAL at 1733201925118 (+1 ms)Closed at 1733201925118 2024-12-03T04:58:45,120 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T04:58:45,120 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T04:58:45,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T04:58:45,122 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T04:58:45,124 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T04:58:45,156 INFO [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer(746): ClusterId : 46411542-9ab6-4669-ba81-c8b9b89e0eb7 2024-12-03T04:58:45,156 DEBUG [RS:0;32beccd76b35:44203 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T04:58:45,159 DEBUG [RS:0;32beccd76b35:44203 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T04:58:45,159 DEBUG [RS:0;32beccd76b35:44203 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T04:58:45,162 DEBUG [RS:0;32beccd76b35:44203 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T04:58:45,163 DEBUG [RS:0;32beccd76b35:44203 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45a3a8d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32beccd76b35/172.17.0.3:0 2024-12-03T04:58:45,178 DEBUG [RS:0;32beccd76b35:44203 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;32beccd76b35:44203 2024-12-03T04:58:45,179 INFO [RS:0;32beccd76b35:44203 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T04:58:45,179 INFO [RS:0;32beccd76b35:44203 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T04:58:45,179 DEBUG [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T04:58:45,180 INFO [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer(2659): reportForDuty to master=32beccd76b35,35531,1733201924718 with port=44203, startcode=1733201924824 2024-12-03T04:58:45,180 DEBUG [RS:0;32beccd76b35:44203 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T04:58:45,183 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:55347, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T04:58:45,184 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35531 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 32beccd76b35,44203,1733201924824 2024-12-03T04:58:45,184 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35531 {}] master.ServerManager(517): Registering regionserver=32beccd76b35,44203,1733201924824 2024-12-03T04:58:45,186 DEBUG [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0 2024-12-03T04:58:45,186 DEBUG [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38443 2024-12-03T04:58:45,186 DEBUG [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T04:58:45,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T04:58:45,189 DEBUG [RS:0;32beccd76b35:44203 {}] zookeeper.ZKUtil(111): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/32beccd76b35,44203,1733201924824 2024-12-03T04:58:45,189 WARN [RS:0;32beccd76b35:44203 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T04:58:45,189 INFO [RS:0;32beccd76b35:44203 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T04:58:45,189 DEBUG [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824 2024-12-03T04:58:45,195 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [32beccd76b35,44203,1733201924824] 2024-12-03T04:58:45,199 INFO [RS:0;32beccd76b35:44203 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T04:58:45,202 INFO [RS:0;32beccd76b35:44203 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T04:58:45,204 INFO [RS:0;32beccd76b35:44203 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T04:58:45,204 INFO [RS:0;32beccd76b35:44203 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,205 INFO [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T04:58:45,206 INFO [RS:0;32beccd76b35:44203 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T04:58:45,206 INFO [RS:0;32beccd76b35:44203 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,207 DEBUG [RS:0;32beccd76b35:44203 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,207 DEBUG [RS:0;32beccd76b35:44203 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,207 DEBUG [RS:0;32beccd76b35:44203 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,207 DEBUG [RS:0;32beccd76b35:44203 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,207 DEBUG [RS:0;32beccd76b35:44203 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,207 DEBUG [RS:0;32beccd76b35:44203 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/32beccd76b35:0, corePoolSize=2, maxPoolSize=2 2024-12-03T04:58:45,207 DEBUG [RS:0;32beccd76b35:44203 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,207 DEBUG [RS:0;32beccd76b35:44203 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,207 DEBUG [RS:0;32beccd76b35:44203 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,207 DEBUG [RS:0;32beccd76b35:44203 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,207 DEBUG [RS:0;32beccd76b35:44203 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,207 DEBUG [RS:0;32beccd76b35:44203 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,207 DEBUG [RS:0;32beccd76b35:44203 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/32beccd76b35:0, corePoolSize=3, maxPoolSize=3 2024-12-03T04:58:45,207 DEBUG [RS:0;32beccd76b35:44203 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0, corePoolSize=3, maxPoolSize=3 2024-12-03T04:58:45,210 INFO [RS:0;32beccd76b35:44203 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,210 INFO [RS:0;32beccd76b35:44203 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,210 INFO [RS:0;32beccd76b35:44203 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,210 INFO [RS:0;32beccd76b35:44203 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,210 INFO [RS:0;32beccd76b35:44203 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,210 INFO [RS:0;32beccd76b35:44203 {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,44203,1733201924824-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T04:58:45,279 WARN [32beccd76b35:35531 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T04:58:45,286 INFO [RS:0;32beccd76b35:44203 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T04:58:45,287 INFO [RS:0;32beccd76b35:44203 {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,44203,1733201924824-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,287 INFO [RS:0;32beccd76b35:44203 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,287 INFO [RS:0;32beccd76b35:44203 {}] regionserver.Replication(171): 32beccd76b35,44203,1733201924824 started 2024-12-03T04:58:45,306 INFO [RS:0;32beccd76b35:44203 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,307 INFO [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer(1482): Serving as 32beccd76b35,44203,1733201924824, RpcServer on 32beccd76b35/172.17.0.3:44203, sessionid=0x101517586e10001 2024-12-03T04:58:45,307 DEBUG [RS:0;32beccd76b35:44203 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T04:58:45,307 DEBUG [RS:0;32beccd76b35:44203 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 32beccd76b35,44203,1733201924824 2024-12-03T04:58:45,307 DEBUG [RS:0;32beccd76b35:44203 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32beccd76b35,44203,1733201924824' 2024-12-03T04:58:45,307 DEBUG [RS:0;32beccd76b35:44203 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T04:58:45,310 DEBUG [RS:0;32beccd76b35:44203 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T04:58:45,311 DEBUG [RS:0;32beccd76b35:44203 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T04:58:45,311 DEBUG [RS:0;32beccd76b35:44203 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T04:58:45,311 DEBUG [RS:0;32beccd76b35:44203 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 32beccd76b35,44203,1733201924824 2024-12-03T04:58:45,311 DEBUG [RS:0;32beccd76b35:44203 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32beccd76b35,44203,1733201924824' 2024-12-03T04:58:45,311 DEBUG [RS:0;32beccd76b35:44203 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T04:58:45,311 DEBUG [RS:0;32beccd76b35:44203 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T04:58:45,312 DEBUG [RS:0;32beccd76b35:44203 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T04:58:45,312 INFO [RS:0;32beccd76b35:44203 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T04:58:45,312 INFO [RS:0;32beccd76b35:44203 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T04:58:45,415 INFO [RS:0;32beccd76b35:44203 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C44203%2C1733201924824, suffix=, logDir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824, archiveDir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/oldWALs, maxLogs=32 2024-12-03T04:58:45,417 INFO [RS:0;32beccd76b35:44203 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C44203%2C1733201924824.1733201925416 2024-12-03T04:58:45,426 INFO [RS:0;32beccd76b35:44203 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201925416 2024-12-03T04:58:45,430 DEBUG [RS:0;32beccd76b35:44203 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44321:44321),(127.0.0.1/127.0.0.1:46347:46347)] 2024-12-03T04:58:45,529 DEBUG [32beccd76b35:35531 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T04:58:45,530 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=32beccd76b35,44203,1733201924824 2024-12-03T04:58:45,532 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 32beccd76b35,44203,1733201924824, state=OPENING 2024-12-03T04:58:45,533 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T04:58:45,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:45,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:58:45,536 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T04:58:45,536 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T04:58:45,536 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T04:58:45,536 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=32beccd76b35,44203,1733201924824}] 2024-12-03T04:58:45,691 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T04:58:45,693 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53827, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T04:58:45,702 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T04:58:45,703 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T04:58:45,706 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C44203%2C1733201924824.meta, suffix=.meta, logDir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824, archiveDir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/oldWALs, maxLogs=32 2024-12-03T04:58:45,707 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta 2024-12-03T04:58:45,720 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta 2024-12-03T04:58:45,727 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46347:46347),(127.0.0.1/127.0.0.1:44321:44321)] 2024-12-03T04:58:45,733 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T04:58:45,734 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T04:58:45,734 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T04:58:45,734 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T04:58:45,734 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T04:58:45,735 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:58:45,735 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T04:58:45,735 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T04:58:45,740 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T04:58:45,742 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T04:58:45,742 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:45,742 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:45,743 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T04:58:45,744 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T04:58:45,744 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:45,744 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:45,744 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T04:58:45,745 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T04:58:45,745 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:45,746 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:45,746 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T04:58:45,747 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T04:58:45,747 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:45,748 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:58:45,748 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T04:58:45,749 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740 2024-12-03T04:58:45,751 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740 2024-12-03T04:58:45,753 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T04:58:45,753 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T04:58:45,754 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T04:58:45,756 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T04:58:45,757 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=803184, jitterRate=0.02130192518234253}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T04:58:45,757 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T04:58:45,758 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733201925735Writing region info on filesystem at 1733201925735Initializing all the Stores at 1733201925737 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201925737Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201925740 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201925740Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201925740Cleaning up temporary data from old regions at 1733201925753 (+13 ms)Running coprocessor post-open hooks at 1733201925757 (+4 ms)Region opened successfully at 1733201925758 (+1 ms) 2024-12-03T04:58:45,760 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733201925690 2024-12-03T04:58:45,763 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T04:58:45,763 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T04:58:45,764 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=32beccd76b35,44203,1733201924824 2024-12-03T04:58:45,766 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 32beccd76b35,44203,1733201924824, state=OPEN 2024-12-03T04:58:45,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T04:58:45,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T04:58:45,775 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T04:58:45,775 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T04:58:45,775 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=32beccd76b35,44203,1733201924824 2024-12-03T04:58:45,784 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T04:58:45,784 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=32beccd76b35,44203,1733201924824 in 239 msec 2024-12-03T04:58:45,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T04:58:45,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 664 msec 2024-12-03T04:58:45,788 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T04:58:45,788 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T04:58:45,790 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T04:58:45,790 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=32beccd76b35,44203,1733201924824, seqNum=-1] 2024-12-03T04:58:45,791 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T04:58:45,792 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37671, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T04:58:45,803 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 731 msec 2024-12-03T04:58:45,803 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733201925803, completionTime=-1 2024-12-03T04:58:45,803 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T04:58:45,803 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T04:58:45,806 INFO [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T04:58:45,806 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733201985806 2024-12-03T04:58:45,806 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733202045806 2024-12-03T04:58:45,806 INFO [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-03T04:58:45,806 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,35531,1733201924718-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,806 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,35531,1733201924718-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,806 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,35531,1733201924718-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,806 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-32beccd76b35:35531, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,807 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,809 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,809 DEBUG [master/32beccd76b35:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T04:58:45,813 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.936sec 2024-12-03T04:58:45,813 INFO [master/32beccd76b35:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T04:58:45,813 INFO [master/32beccd76b35:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T04:58:45,813 INFO [master/32beccd76b35:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T04:58:45,814 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T04:58:45,814 INFO [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T04:58:45,814 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,35531,1733201924718-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T04:58:45,814 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,35531,1733201924718-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T04:58:45,821 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T04:58:45,821 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T04:58:45,821 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,35531,1733201924718-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,880 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ba7c5c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T04:58:45,880 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 32beccd76b35,35531,-1 for getting cluster id 2024-12-03T04:58:45,881 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T04:58:45,886 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '46411542-9ab6-4669-ba81-c8b9b89e0eb7' 2024-12-03T04:58:45,886 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T04:58:45,887 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "46411542-9ab6-4669-ba81-c8b9b89e0eb7" 2024-12-03T04:58:45,887 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d65fa4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T04:58:45,887 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [32beccd76b35,35531,-1] 2024-12-03T04:58:45,887 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T04:58:45,891 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:58:45,893 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56556, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T04:58:45,894 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15977620, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T04:58:45,895 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T04:58:45,896 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=32beccd76b35,44203,1733201924824, seqNum=-1] 2024-12-03T04:58:45,896 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T04:58:45,898 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37526, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T04:58:45,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=32beccd76b35,35531,1733201924718 2024-12-03T04:58:45,901 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:45,904 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T04:58:45,920 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/32beccd76b35:0 server-side Connection retries=45 2024-12-03T04:58:45,920 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:58:45,920 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T04:58:45,920 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T04:58:45,920 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:58:45,920 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T04:58:45,920 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T04:58:45,920 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T04:58:45,921 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38103 2024-12-03T04:58:45,922 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38103 connecting to ZooKeeper ensemble=127.0.0.1:50617 2024-12-03T04:58:45,923 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:45,925 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:58:45,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:381030x0, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T04:58:45,930 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:381030x0, quorum=127.0.0.1:50617, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-03T04:58:45,930 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-03T04:58:45,930 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38103-0x101517586e10002 connected 2024-12-03T04:58:45,931 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T04:58:45,931 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T04:58:45,932 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:38103-0x101517586e10002, quorum=127.0.0.1:50617, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T04:58:45,933 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38103-0x101517586e10002, quorum=127.0.0.1:50617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T04:58:45,934 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38103 2024-12-03T04:58:45,934 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38103 2024-12-03T04:58:45,935 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38103 2024-12-03T04:58:45,935 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38103 2024-12-03T04:58:45,935 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38103 2024-12-03T04:58:45,937 INFO [RS:1;32beccd76b35:38103 {}] regionserver.HRegionServer(746): ClusterId : 46411542-9ab6-4669-ba81-c8b9b89e0eb7 2024-12-03T04:58:45,937 DEBUG [RS:1;32beccd76b35:38103 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T04:58:45,939 DEBUG [RS:1;32beccd76b35:38103 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T04:58:45,939 DEBUG [RS:1;32beccd76b35:38103 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T04:58:45,941 DEBUG [RS:1;32beccd76b35:38103 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T04:58:45,942 DEBUG [RS:1;32beccd76b35:38103 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c027df9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32beccd76b35/172.17.0.3:0 2024-12-03T04:58:45,959 DEBUG [RS:1;32beccd76b35:38103 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;32beccd76b35:38103 2024-12-03T04:58:45,959 INFO [RS:1;32beccd76b35:38103 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T04:58:45,959 INFO [RS:1;32beccd76b35:38103 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T04:58:45,959 DEBUG [RS:1;32beccd76b35:38103 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T04:58:45,960 INFO [RS:1;32beccd76b35:38103 {}] regionserver.HRegionServer(2659): reportForDuty to master=32beccd76b35,35531,1733201924718 with port=38103, startcode=1733201925919 2024-12-03T04:58:45,960 DEBUG [RS:1;32beccd76b35:38103 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T04:58:45,963 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54219, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T04:58:45,963 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35531 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 32beccd76b35,38103,1733201925919 2024-12-03T04:58:45,963 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35531 {}] master.ServerManager(517): Registering regionserver=32beccd76b35,38103,1733201925919 2024-12-03T04:58:45,965 DEBUG [RS:1;32beccd76b35:38103 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0 2024-12-03T04:58:45,965 DEBUG [RS:1;32beccd76b35:38103 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38443 2024-12-03T04:58:45,965 DEBUG [RS:1;32beccd76b35:38103 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T04:58:45,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T04:58:45,967 DEBUG [RS:1;32beccd76b35:38103 {}] zookeeper.ZKUtil(111): regionserver:38103-0x101517586e10002, quorum=127.0.0.1:50617, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/32beccd76b35,38103,1733201925919 2024-12-03T04:58:45,968 WARN [RS:1;32beccd76b35:38103 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T04:58:45,968 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [32beccd76b35,38103,1733201925919] 2024-12-03T04:58:45,968 INFO [RS:1;32beccd76b35:38103 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T04:58:45,968 DEBUG [RS:1;32beccd76b35:38103 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919 2024-12-03T04:58:45,972 INFO [RS:1;32beccd76b35:38103 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T04:58:45,974 INFO [RS:1;32beccd76b35:38103 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T04:58:45,975 INFO [RS:1;32beccd76b35:38103 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T04:58:45,975 INFO [RS:1;32beccd76b35:38103 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,975 INFO [RS:1;32beccd76b35:38103 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T04:58:45,976 INFO [RS:1;32beccd76b35:38103 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T04:58:45,976 INFO [RS:1;32beccd76b35:38103 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,977 DEBUG [RS:1;32beccd76b35:38103 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,977 DEBUG [RS:1;32beccd76b35:38103 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,977 DEBUG [RS:1;32beccd76b35:38103 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,977 DEBUG [RS:1;32beccd76b35:38103 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,977 DEBUG [RS:1;32beccd76b35:38103 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,977 DEBUG [RS:1;32beccd76b35:38103 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/32beccd76b35:0, corePoolSize=2, maxPoolSize=2 2024-12-03T04:58:45,977 DEBUG [RS:1;32beccd76b35:38103 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,977 DEBUG [RS:1;32beccd76b35:38103 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,977 DEBUG [RS:1;32beccd76b35:38103 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,977 DEBUG [RS:1;32beccd76b35:38103 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,977 DEBUG [RS:1;32beccd76b35:38103 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,977 DEBUG [RS:1;32beccd76b35:38103 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:58:45,977 DEBUG [RS:1;32beccd76b35:38103 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/32beccd76b35:0, corePoolSize=3, maxPoolSize=3 2024-12-03T04:58:45,977 DEBUG [RS:1;32beccd76b35:38103 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0, corePoolSize=3, maxPoolSize=3 2024-12-03T04:58:45,978 INFO [RS:1;32beccd76b35:38103 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,978 INFO [RS:1;32beccd76b35:38103 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,978 INFO [RS:1;32beccd76b35:38103 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,978 INFO [RS:1;32beccd76b35:38103 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,978 INFO [RS:1;32beccd76b35:38103 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:45,978 INFO [RS:1;32beccd76b35:38103 {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,38103,1733201925919-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T04:58:46,002 INFO [RS:1;32beccd76b35:38103 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T04:58:46,002 INFO [RS:1;32beccd76b35:38103 {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,38103,1733201925919-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:46,003 INFO [RS:1;32beccd76b35:38103 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:46,003 INFO [RS:1;32beccd76b35:38103 {}] regionserver.Replication(171): 32beccd76b35,38103,1733201925919 started 2024-12-03T04:58:46,020 INFO [RS:1;32beccd76b35:38103 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:58:46,020 INFO [RS:1;32beccd76b35:38103 {}] regionserver.HRegionServer(1482): Serving as 32beccd76b35,38103,1733201925919, RpcServer on 32beccd76b35/172.17.0.3:38103, sessionid=0x101517586e10002 2024-12-03T04:58:46,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;32beccd76b35:38103,5,FailOnTimeoutGroup] 2024-12-03T04:58:46,021 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-03T04:58:46,021 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T04:58:46,022 DEBUG [RS:1;32beccd76b35:38103 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T04:58:46,022 DEBUG [RS:1;32beccd76b35:38103 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 32beccd76b35,38103,1733201925919 2024-12-03T04:58:46,022 DEBUG [RS:1;32beccd76b35:38103 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32beccd76b35,38103,1733201925919' 2024-12-03T04:58:46,022 DEBUG [RS:1;32beccd76b35:38103 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T04:58:46,023 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 32beccd76b35,35531,1733201924718 2024-12-03T04:58:46,023 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@cb496b3 2024-12-03T04:58:46,023 DEBUG [RS:1;32beccd76b35:38103 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T04:58:46,023 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T04:58:46,024 DEBUG [RS:1;32beccd76b35:38103 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T04:58:46,024 DEBUG [RS:1;32beccd76b35:38103 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T04:58:46,024 DEBUG [RS:1;32beccd76b35:38103 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 32beccd76b35,38103,1733201925919 2024-12-03T04:58:46,024 DEBUG [RS:1;32beccd76b35:38103 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32beccd76b35,38103,1733201925919' 2024-12-03T04:58:46,024 DEBUG [RS:1;32beccd76b35:38103 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T04:58:46,025 DEBUG [RS:1;32beccd76b35:38103 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T04:58:46,026 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56564, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T04:58:46,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35531 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T04:58:46,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35531 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T04:58:46,027 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35531 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T04:58:46,028 DEBUG [RS:1;32beccd76b35:38103 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T04:58:46,029 INFO [RS:1;32beccd76b35:38103 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T04:58:46,029 INFO [RS:1;32beccd76b35:38103 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T04:58:46,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35531 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-03T04:58:46,030 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T04:58:46,031 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:46,031 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35531 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-03T04:58:46,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35531 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T04:58:46,032 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T04:58:46,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38707 is added to blk_1073741835_1011 (size=393) 2024-12-03T04:58:46,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45599 is added to blk_1073741835_1011 (size=393) 2024-12-03T04:58:46,043 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e4140b7a4d517ddb89f7b0d124e5ba92, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0 2024-12-03T04:58:46,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38707 is added to blk_1073741836_1012 (size=76) 2024-12-03T04:58:46,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45599 is added to blk_1073741836_1012 (size=76) 2024-12-03T04:58:46,057 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:58:46,057 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing e4140b7a4d517ddb89f7b0d124e5ba92, disabling compactions & flushes 2024-12-03T04:58:46,057 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. 2024-12-03T04:58:46,057 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. 2024-12-03T04:58:46,057 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. after waiting 0 ms 2024-12-03T04:58:46,057 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. 2024-12-03T04:58:46,057 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. 2024-12-03T04:58:46,057 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for e4140b7a4d517ddb89f7b0d124e5ba92: Waiting for close lock at 1733201926057Disabling compacts and flushes for region at 1733201926057Disabling writes for close at 1733201926057Writing region close event to WAL at 1733201926057Closed at 1733201926057 2024-12-03T04:58:46,059 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T04:58:46,060 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733201926059"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733201926059"}]},"ts":"1733201926059"} 2024-12-03T04:58:46,063 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T04:58:46,065 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T04:58:46,065 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733201926065"}]},"ts":"1733201926065"} 2024-12-03T04:58:46,067 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-03T04:58:46,068 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e4140b7a4d517ddb89f7b0d124e5ba92, ASSIGN}] 2024-12-03T04:58:46,069 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e4140b7a4d517ddb89f7b0d124e5ba92, ASSIGN 2024-12-03T04:58:46,071 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e4140b7a4d517ddb89f7b0d124e5ba92, ASSIGN; state=OFFLINE, location=32beccd76b35,44203,1733201924824; forceNewPlan=false, retain=false 2024-12-03T04:58:46,131 INFO [RS:1;32beccd76b35:38103 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C38103%2C1733201925919, suffix=, logDir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919, archiveDir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/oldWALs, maxLogs=32 2024-12-03T04:58:46,132 INFO [RS:1;32beccd76b35:38103 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C38103%2C1733201925919.1733201926132 2024-12-03T04:58:46,139 INFO [RS:1;32beccd76b35:38103 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 2024-12-03T04:58:46,140 DEBUG [RS:1;32beccd76b35:38103 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46347:46347),(127.0.0.1/127.0.0.1:44321:44321)] 2024-12-03T04:58:46,222 INFO [32beccd76b35:35531 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-03T04:58:46,222 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e4140b7a4d517ddb89f7b0d124e5ba92, regionState=OPENING, regionLocation=32beccd76b35,44203,1733201924824 2024-12-03T04:58:46,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e4140b7a4d517ddb89f7b0d124e5ba92, ASSIGN because future has completed 2024-12-03T04:58:46,226 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e4140b7a4d517ddb89f7b0d124e5ba92, server=32beccd76b35,44203,1733201924824}] 2024-12-03T04:58:46,384 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. 2024-12-03T04:58:46,385 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e4140b7a4d517ddb89f7b0d124e5ba92, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92.', STARTKEY => '', ENDKEY => ''} 2024-12-03T04:58:46,385 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:58:46,386 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:58:46,386 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:58:46,386 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:58:46,387 INFO [StoreOpener-e4140b7a4d517ddb89f7b0d124e5ba92-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:58:46,389 INFO [StoreOpener-e4140b7a4d517ddb89f7b0d124e5ba92-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4140b7a4d517ddb89f7b0d124e5ba92 columnFamilyName info 2024-12-03T04:58:46,389 DEBUG [StoreOpener-e4140b7a4d517ddb89f7b0d124e5ba92-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:58:46,390 INFO [StoreOpener-e4140b7a4d517ddb89f7b0d124e5ba92-1 {}] regionserver.HStore(327): Store=e4140b7a4d517ddb89f7b0d124e5ba92/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T04:58:46,390 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:58:46,391 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:58:46,391 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:58:46,391 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:58:46,392 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:58:46,393 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:58:46,396 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T04:58:46,396 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e4140b7a4d517ddb89f7b0d124e5ba92; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=845186, jitterRate=0.07471013069152832}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T04:58:46,396 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:58:46,397 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e4140b7a4d517ddb89f7b0d124e5ba92: Running coprocessor pre-open hook at 1733201926386Writing region info on filesystem at 1733201926386Initializing all the Stores at 1733201926387 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201926387Cleaning up temporary data from old regions at 1733201926392 (+5 ms)Running coprocessor post-open hooks at 1733201926396 (+4 ms)Region opened successfully at 1733201926397 (+1 ms) 2024-12-03T04:58:46,399 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92., pid=6, masterSystemTime=1733201926379 2024-12-03T04:58:46,402 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. 2024-12-03T04:58:46,402 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. 2024-12-03T04:58:46,403 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e4140b7a4d517ddb89f7b0d124e5ba92, regionState=OPEN, openSeqNum=2, regionLocation=32beccd76b35,44203,1733201924824 2024-12-03T04:58:46,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e4140b7a4d517ddb89f7b0d124e5ba92, server=32beccd76b35,44203,1733201924824 because future has completed 2024-12-03T04:58:46,412 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T04:58:46,412 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e4140b7a4d517ddb89f7b0d124e5ba92, server=32beccd76b35,44203,1733201924824 in 182 msec 2024-12-03T04:58:46,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T04:58:46,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e4140b7a4d517ddb89f7b0d124e5ba92, ASSIGN in 344 msec 2024-12-03T04:58:46,418 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T04:58:46,418 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733201926418"}]},"ts":"1733201926418"} 2024-12-03T04:58:46,421 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-03T04:58:46,422 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T04:58:46,425 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 396 msec 2024-12-03T04:58:51,237 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T04:58:51,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:58:51,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:58:51,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:58:51,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:58:51,267 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-03T04:58:52,833 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T04:58:52,833 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-03T04:58:52,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-03T04:58:52,834 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-03T04:58:52,835 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T04:58:52,835 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-03T04:58:56,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35531 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T04:58:56,050 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-03T04:58:56,050 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-03T04:58:56,054 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-03T04:58:56,054 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. 2024-12-03T04:58:56,077 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:58:56,082 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:58:56,083 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:58:56,083 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:58:56,083 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T04:58:56,084 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@515a142d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:58:56,084 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2152b420{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:58:56,226 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76687f3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/java.io.tmpdir/jetty-localhost-33685-hadoop-hdfs-3_4_1-tests_jar-_-any-9258402526218489974/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:58:56,227 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@9ba038{HTTP/1.1, (http/1.1)}{localhost:33685} 2024-12-03T04:58:56,227 INFO [Time-limited test {}] server.Server(415): Started @120389ms 2024-12-03T04:58:56,228 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T04:58:56,276 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:58:56,281 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:58:56,289 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:58:56,289 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:58:56,290 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T04:58:56,294 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5979cbd4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:58:56,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79eaed89{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:58:56,347 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data5/current/BP-851166150-172.17.0.3-1733201923677/current, will proceed with Du for space computation calculation, 2024-12-03T04:58:56,354 WARN [Thread-830 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data6/current/BP-851166150-172.17.0.3-1733201923677/current, will proceed with Du for space computation calculation, 2024-12-03T04:58:56,384 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T04:58:56,387 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xae896fb8f55c9dfd with lease ID 0x9c1667867a4b1489: Processing first storage report for DS-c1f4e767-cc62-467e-883b-672977a588e4 from datanode DatanodeRegistration(127.0.0.1:40009, datanodeUuid=989e7ddf-cac0-4781-a51d-71abdbc86a85, infoPort=35411, infoSecurePort=0, ipcPort=35471, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677) 2024-12-03T04:58:56,387 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xae896fb8f55c9dfd with lease ID 0x9c1667867a4b1489: from storage DS-c1f4e767-cc62-467e-883b-672977a588e4 node DatanodeRegistration(127.0.0.1:40009, datanodeUuid=989e7ddf-cac0-4781-a51d-71abdbc86a85, infoPort=35411, infoSecurePort=0, ipcPort=35471, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:58:56,387 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xae896fb8f55c9dfd with lease ID 0x9c1667867a4b1489: Processing first storage report for DS-ead19bd1-0f39-4d37-a452-18d906d8fc61 from datanode DatanodeRegistration(127.0.0.1:40009, datanodeUuid=989e7ddf-cac0-4781-a51d-71abdbc86a85, infoPort=35411, infoSecurePort=0, ipcPort=35471, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677) 2024-12-03T04:58:56,387 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xae896fb8f55c9dfd with lease ID 0x9c1667867a4b1489: from storage DS-ead19bd1-0f39-4d37-a452-18d906d8fc61 node DatanodeRegistration(127.0.0.1:40009, datanodeUuid=989e7ddf-cac0-4781-a51d-71abdbc86a85, infoPort=35411, infoSecurePort=0, ipcPort=35471, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:58:56,448 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@67ec96b7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/java.io.tmpdir/jetty-localhost-37169-hadoop-hdfs-3_4_1-tests_jar-_-any-13030090919457709449/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:58:56,448 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@664e0b4e{HTTP/1.1, (http/1.1)}{localhost:37169} 2024-12-03T04:58:56,448 INFO [Time-limited test {}] server.Server(415): Started @120611ms 2024-12-03T04:58:56,450 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T04:58:56,542 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:58:56,555 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:58:56,577 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:58:56,578 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:58:56,578 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T04:58:56,580 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55fcfbe4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:58:56,580 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66098333{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:58:56,622 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data7/current/BP-851166150-172.17.0.3-1733201923677/current, will proceed with Du for space computation calculation, 2024-12-03T04:58:56,625 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data8/current/BP-851166150-172.17.0.3-1733201923677/current, will proceed with Du for space computation calculation, 2024-12-03T04:58:56,681 WARN [Thread-844 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T04:58:56,684 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2446651ca32bb473 with lease ID 0x9c1667867a4b148a: Processing first storage report for DS-cb18d10d-2d52-4122-8d00-16b74e559ba3 from datanode DatanodeRegistration(127.0.0.1:35215, datanodeUuid=0b88c979-8655-42f1-8356-0f1c738e6cf7, infoPort=40501, infoSecurePort=0, ipcPort=45499, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677) 2024-12-03T04:58:56,684 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2446651ca32bb473 with lease ID 0x9c1667867a4b148a: from storage DS-cb18d10d-2d52-4122-8d00-16b74e559ba3 node DatanodeRegistration(127.0.0.1:35215, datanodeUuid=0b88c979-8655-42f1-8356-0f1c738e6cf7, infoPort=40501, infoSecurePort=0, ipcPort=45499, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:58:56,684 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2446651ca32bb473 with lease ID 0x9c1667867a4b148a: Processing first storage report for DS-4aefd67e-a98a-4969-8e7b-e53f951353e1 from datanode DatanodeRegistration(127.0.0.1:35215, datanodeUuid=0b88c979-8655-42f1-8356-0f1c738e6cf7, infoPort=40501, infoSecurePort=0, ipcPort=45499, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677) 2024-12-03T04:58:56,684 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2446651ca32bb473 with lease ID 0x9c1667867a4b148a: from storage DS-4aefd67e-a98a-4969-8e7b-e53f951353e1 node DatanodeRegistration(127.0.0.1:35215, datanodeUuid=0b88c979-8655-42f1-8356-0f1c738e6cf7, infoPort=40501, infoSecurePort=0, ipcPort=45499, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:58:56,745 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d8f7a8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/java.io.tmpdir/jetty-localhost-39977-hadoop-hdfs-3_4_1-tests_jar-_-any-7014530881800169308/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:58:56,746 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@543df762{HTTP/1.1, (http/1.1)}{localhost:39977} 2024-12-03T04:58:56,746 INFO [Time-limited test {}] server.Server(415): Started @120908ms 2024-12-03T04:58:56,747 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T04:58:56,845 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data9/current/BP-851166150-172.17.0.3-1733201923677/current, will proceed with Du for space computation calculation, 2024-12-03T04:58:56,845 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data10/current/BP-851166150-172.17.0.3-1733201923677/current, will proceed with Du for space computation calculation, 2024-12-03T04:58:56,867 WARN [Thread-879 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T04:58:56,870 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f4d9df09bf9774f with lease ID 0x9c1667867a4b148b: Processing first storage report for DS-dcbe587d-f673-4048-a95b-9eff4dee3851 from datanode DatanodeRegistration(127.0.0.1:33573, datanodeUuid=844ee8f8-d77f-407f-8243-8c67f287e0de, infoPort=35549, infoSecurePort=0, ipcPort=43949, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677) 2024-12-03T04:58:56,870 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f4d9df09bf9774f with lease ID 0x9c1667867a4b148b: from storage DS-dcbe587d-f673-4048-a95b-9eff4dee3851 node DatanodeRegistration(127.0.0.1:33573, datanodeUuid=844ee8f8-d77f-407f-8243-8c67f287e0de, infoPort=35549, infoSecurePort=0, ipcPort=43949, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:58:56,870 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f4d9df09bf9774f with lease ID 0x9c1667867a4b148b: Processing first storage report for DS-171c3f56-9e9d-42b3-9711-26e452abeea5 from datanode DatanodeRegistration(127.0.0.1:33573, datanodeUuid=844ee8f8-d77f-407f-8243-8c67f287e0de, infoPort=35549, infoSecurePort=0, ipcPort=43949, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677) 2024-12-03T04:58:56,870 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f4d9df09bf9774f with lease ID 0x9c1667867a4b148b: from storage DS-171c3f56-9e9d-42b3-9711-26e452abeea5 node DatanodeRegistration(127.0.0.1:33573, datanodeUuid=844ee8f8-d77f-407f-8243-8c67f287e0de, infoPort=35549, infoSecurePort=0, ipcPort=43949, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:58:56,970 WARN [ResponseProcessor for block BP-851166150-172.17.0.3-1733201923677:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-851166150-172.17.0.3-1733201923677:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:56,971 WARN [ResponseProcessor for block BP-851166150-172.17.0.3-1733201923677:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-851166150-172.17.0.3-1733201923677:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:56,971 WARN [DataStreamer for file /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 block BP-851166150-172.17.0.3-1733201923677:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]) is bad. 2024-12-03T04:58:56,971 WARN [DataStreamer for file /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta block BP-851166150-172.17.0.3-1733201923677:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]) is bad. 2024-12-03T04:58:56,971 WARN [ResponseProcessor for block BP-851166150-172.17.0.3-1733201923677:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-851166150-172.17.0.3-1733201923677:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-851166150-172.17.0.3-1733201923677:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:56,972 WARN [DataStreamer for file /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/WALs/32beccd76b35,35531,1733201924718/32beccd76b35%2C35531%2C1733201924718.1733201924973 block BP-851166150-172.17.0.3-1733201923677:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK], DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]) is bad. 2024-12-03T04:58:56,972 WARN [ResponseProcessor for block BP-851166150-172.17.0.3-1733201923677:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-851166150-172.17.0.3-1733201923677:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-851166150-172.17.0.3-1733201923677:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:56,973 WARN [DataStreamer for file /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201925416 block BP-851166150-172.17.0.3-1733201923677:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK], DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]) is bad. 2024-12-03T04:58:56,973 WARN [PacketResponder: BP-851166150-172.17.0.3-1733201923677:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38707] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:58:56,973 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1028590899_22 at /127.0.0.1:35484 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45599:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35484 dst: /127.0.0.1:45599 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:58:56,974 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7748d0b0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:58:56,974 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@29fc42c9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:58:56,974 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:58:56,975 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47d3f760{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:58:56,975 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5aaecb38{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.log.dir/,STOPPED} 2024-12-03T04:58:56,975 WARN [PacketResponder: BP-851166150-172.17.0.3-1733201923677:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38707] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:58:56,975 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46807595_22 at /127.0.0.1:35538 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:45599:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35538 dst: /127.0.0.1:45599 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:58:56,976 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:52728 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38707:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52728 dst: /127.0.0.1:38707 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[closed]. Total timeout mills is 60000, 49422 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:58:56,975 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-46807595_22 at /127.0.0.1:52758 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:38707:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52758 dst: /127.0.0.1:38707 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:58:56,977 WARN [BP-851166150-172.17.0.3-1733201923677 heartbeating to localhost/127.0.0.1:38443 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T04:58:56,975 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:52734 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38707:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52734 dst: /127.0.0.1:38707 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:58:56,975 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:35510 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45599:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35510 dst: /127.0.0.1:45599 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:58:56,977 WARN [BP-851166150-172.17.0.3-1733201923677 heartbeating to localhost/127.0.0.1:38443 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-851166150-172.17.0.3-1733201923677 (Datanode Uuid b8f66bf4-78e5-4a33-b801-cf0b5bb96189) service to localhost/127.0.0.1:38443 2024-12-03T04:58:56,975 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1028590899_22 at /127.0.0.1:52694 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38707:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52694 dst: /127.0.0.1:38707 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:58:56,976 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:35508 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45599:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35508 dst: /127.0.0.1:45599 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:58:56,977 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T04:58:56,977 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T04:58:56,978 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data3/current/BP-851166150-172.17.0.3-1733201923677 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:58:56,978 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data4/current/BP-851166150-172.17.0.3-1733201923677 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:58:56,981 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T04:58:56,982 WARN [DataStreamer for file /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/WALs/32beccd76b35,35531,1733201924718/32beccd76b35%2C35531%2C1733201924718.1733201924973 block BP-851166150-172.17.0.3-1733201923677:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:56,983 WARN [DataStreamer for file /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 block BP-851166150-172.17.0.3-1733201923677:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:56,983 WARN [DataStreamer for file /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta block BP-851166150-172.17.0.3-1733201923677:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:56,983 WARN [DataStreamer for file /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201925416 block BP-851166150-172.17.0.3-1733201923677:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:56,986 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@36912d0f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:58:56,986 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@253e01a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:58:56,986 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:58:56,986 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6277b6b4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:58:56,986 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@795b72e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.log.dir/,STOPPED} 2024-12-03T04:58:56,988 WARN [BP-851166150-172.17.0.3-1733201923677 heartbeating to localhost/127.0.0.1:38443 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T04:58:56,988 WARN [BP-851166150-172.17.0.3-1733201923677 heartbeating to localhost/127.0.0.1:38443 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-851166150-172.17.0.3-1733201923677 (Datanode Uuid 65c66e99-3abb-4257-b6c4-a79e2df13ead) service to localhost/127.0.0.1:38443 2024-12-03T04:58:56,988 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T04:58:56,988 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T04:58:56,988 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data1/current/BP-851166150-172.17.0.3-1733201923677 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:58:56,989 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data2/current/BP-851166150-172.17.0.3-1733201923677 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:58:56,989 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T04:58:56,993 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92., hostname=32beccd76b35,44203,1733201924824, seqNum=2] 2024-12-03T04:58:56,995 ERROR [FSHLog-0-hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0-prefix:32beccd76b35,44203,1733201924824 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:56,995 WARN [FSHLog-0-hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0-prefix:32beccd76b35,44203,1733201924824 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:56,995 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:56,995 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 32beccd76b35%2C44203%2C1733201924824:(num 1733201925416) roll requested 2024-12-03T04:58:56,996 INFO [regionserver/32beccd76b35:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C44203%2C1733201924824.1733201936995 2024-12-03T04:58:56,998 WARN [Thread-901 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:56,999 WARN [Thread-901 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]) is bad. 2024-12-03T04:58:56,999 WARN [Thread-901 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741838_1018 2024-12-03T04:58:57,001 WARN [Thread-901 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK] 2024-12-03T04:58:57,007 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:57,007 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:57,008 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:57,008 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:57,008 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:58:57,008 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201925416 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201936995 2024-12-03T04:58:57,008 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:57,009 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:57,009 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-03T04:58:57,010 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-03T04:58:57,010 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201925416 2024-12-03T04:58:57,011 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35411:35411),(127.0.0.1/127.0.0.1:35549:35549)] 2024-12-03T04:58:57,011 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201925416 is not closed yet, will try archiving it next time 2024-12-03T04:58:57,012 WARN [IPC Server handler 4 on default port 38443 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201925416 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-12-03T04:58:57,016 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201925416 after 4ms 2024-12-03T04:58:57,979 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:58,940 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:59,011 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:59,013 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201936995 2024-12-03T04:58:59,014 WARN [ResponseProcessor for block BP-851166150-172.17.0.3-1733201923677:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-851166150-172.17.0.3-1733201923677:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:58:59,014 WARN [DataStreamer for file /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201936995 block BP-851166150-172.17.0.3-1733201923677:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK], DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]) is bad. 2024-12-03T04:58:59,015 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:57822 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:40009:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57822 dst: /127.0.0.1:40009 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:58:59,015 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:47088 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:33573:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47088 dst: /127.0.0.1:33573 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:58:59,017 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76687f3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:58:59,018 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@9ba038{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:58:59,018 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:58:59,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2152b420{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:58:59,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@515a142d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.log.dir/,STOPPED} 2024-12-03T04:58:59,023 WARN [BP-851166150-172.17.0.3-1733201923677 heartbeating to localhost/127.0.0.1:38443 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T04:58:59,023 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T04:58:59,023 WARN [BP-851166150-172.17.0.3-1733201923677 heartbeating to localhost/127.0.0.1:38443 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-851166150-172.17.0.3-1733201923677 (Datanode Uuid 989e7ddf-cac0-4781-a51d-71abdbc86a85) service to localhost/127.0.0.1:38443 2024-12-03T04:58:59,023 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T04:58:59,024 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data5/current/BP-851166150-172.17.0.3-1733201923677 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:58:59,024 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data6/current/BP-851166150-172.17.0.3-1733201923677 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:58:59,024 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T04:58:59,979 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:00,940 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:01,012 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:01,012 WARN [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]] 2024-12-03T04:59:01,013 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 32beccd76b35%2C44203%2C1733201924824:(num 1733201936995) roll requested 2024-12-03T04:59:01,013 INFO [regionserver/32beccd76b35:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C44203%2C1733201924824.1733201941013 2024-12-03T04:59:01,018 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201925416 after 4008ms 2024-12-03T04:59:01,020 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:01,021 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]) is bad. 2024-12-03T04:59:01,021 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741840_1022 2024-12-03T04:59:01,022 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK] 2024-12-03T04:59:01,023 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:01,023 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]) is bad. 2024-12-03T04:59:01,024 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741841_1023 2024-12-03T04:59:01,024 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK] 2024-12-03T04:59:01,027 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45599 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:01,027 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:45216 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data8]'}, localName='127.0.0.1:35215', datanodeUuid='0b88c979-8655-42f1-8356-0f1c738e6cf7', xmitsInProgress=0}:Exception transferring block BP-851166150-172.17.0.3-1733201923677:blk_1073741842_1024 to mirror 127.0.0.1:45599 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:01,027 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK], DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]) is bad. 2024-12-03T04:59:01,027 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741842_1024 2024-12-03T04:59:01,027 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:45216 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T04:59:01,027 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:45216 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:35215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45216 dst: /127.0.0.1:35215 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:01,028 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK] 2024-12-03T04:59:01,030 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T04:59:01,034 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:01,035 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:01,035 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:01,035 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:01,035 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:01,035 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201936995 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201941013 2024-12-03T04:59:01,036 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35549:35549),(127.0.0.1/127.0.0.1:40501:40501)] 2024-12-03T04:59:01,037 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201925416 is not closed yet, will try archiving it next time 2024-12-03T04:59:01,037 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201936995 is not closed yet, will try archiving it next time 2024-12-03T04:59:01,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741839_1021 (size=3600) 2024-12-03T04:59:01,438 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201925416 is not closed yet, will try archiving it next time 2024-12-03T04:59:01,979 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:02,878 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@33867007[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33573, datanodeUuid=844ee8f8-d77f-407f-8243-8c67f287e0de, infoPort=35549, infoSecurePort=0, ipcPort=43949, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677):Failed to transfer BP-851166150-172.17.0.3-1733201923677:blk_1073741839_1021 to 127.0.0.1:45599 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:02,940 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:03,034 WARN [ResponseProcessor for block BP-851166150-172.17.0.3-1733201923677:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-851166150-172.17.0.3-1733201923677:blk_1073741843_1025 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:03,034 WARN [DataStreamer for file /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201941013 block BP-851166150-172.17.0.3-1733201923677:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]) is bad. 2024-12-03T04:59:03,037 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:45218 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:35215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45218 dst: /127.0.0.1:35215 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:35215 remote=/127.0.0.1:45218]. Total timeout mills is 60000, 57997 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:03,037 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:47112 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:33573:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47112 dst: /127.0.0.1:33573 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:03,037 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:03,037 WARN [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]] 2024-12-03T04:59:03,037 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 32beccd76b35%2C44203%2C1733201924824:(num 1733201941013) roll requested 2024-12-03T04:59:03,038 INFO [regionserver/32beccd76b35:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C44203%2C1733201924824.1733201943037 2024-12-03T04:59:03,051 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40009 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:03,051 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:57924 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data8]'}, localName='127.0.0.1:35215', datanodeUuid='0b88c979-8655-42f1-8356-0f1c738e6cf7', xmitsInProgress=0}:Exception transferring block BP-851166150-172.17.0.3-1733201923677:blk_1073741844_1027 to mirror 127.0.0.1:40009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:03,051 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d8f7a8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:59:03,051 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK], DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]) is bad. 2024-12-03T04:59:03,051 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741844_1027 2024-12-03T04:59:03,051 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:57924 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T04:59:03,051 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:57924 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:35215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57924 dst: /127.0.0.1:35215 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:03,052 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@543df762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:59:03,052 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:59:03,052 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66098333{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:59:03,052 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK] 2024-12-03T04:59:03,052 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55fcfbe4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.log.dir/,STOPPED} 2024-12-03T04:59:03,054 WARN [BP-851166150-172.17.0.3-1733201923677 heartbeating to localhost/127.0.0.1:38443 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T04:59:03,054 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T04:59:03,054 WARN [BP-851166150-172.17.0.3-1733201923677 heartbeating to localhost/127.0.0.1:38443 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-851166150-172.17.0.3-1733201923677 (Datanode Uuid 844ee8f8-d77f-407f-8243-8c67f287e0de) service to localhost/127.0.0.1:38443 2024-12-03T04:59:03,054 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T04:59:03,055 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:03,055 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data9/current/BP-851166150-172.17.0.3-1733201923677 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:59:03,055 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK], DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]) is bad. 2024-12-03T04:59:03,055 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741845_1028 2024-12-03T04:59:03,055 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data10/current/BP-851166150-172.17.0.3-1733201923677 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:59:03,055 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T04:59:03,056 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK] 2024-12-03T04:59:03,058 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:03,058 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK], DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]) is bad. 2024-12-03T04:59:03,058 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741846_1029 2024-12-03T04:59:03,059 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK] 2024-12-03T04:59:03,060 WARN [Thread-922 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:03,060 WARN [Thread-922 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]) is bad. 2024-12-03T04:59:03,060 WARN [Thread-922 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741847_1030 2024-12-03T04:59:03,061 WARN [Thread-922 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK] 2024-12-03T04:59:03,062 WARN [IPC Server handler 2 on default port 38443 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T04:59:03,062 WARN [IPC Server handler 2 on default port 38443 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T04:59:03,062 WARN [IPC Server handler 2 on default port 38443 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T04:59:03,066 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:03,066 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:03,066 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:03,066 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:03,066 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:03,067 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201941013 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201943037 2024-12-03T04:59:03,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741843_1026 (size=2431) 2024-12-03T04:59:03,070 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201925416 is not closed yet, will try archiving it next time 2024-12-03T04:59:03,086 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40501:40501)] 2024-12-03T04:59:03,086 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201925416 is not closed yet, will try archiving it next time 2024-12-03T04:59:03,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44203 {}] regionserver.HRegion(8855): Flush requested on e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:59:03,090 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e4140b7a4d517ddb89f7b0d124e5ba92 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T04:59:03,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/0543a8c3813c40d68f1c5c77f4726226 is 1080, key is row0002/info:/1733201939026/Put/seqid=0 2024-12-03T04:59:03,116 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:03,116 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK], DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]) is bad. 2024-12-03T04:59:03,116 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741849_1032 2024-12-03T04:59:03,117 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK] 2024-12-03T04:59:03,118 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:03,118 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]) is bad. 2024-12-03T04:59:03,118 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741850_1033 2024-12-03T04:59:03,119 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK] 2024-12-03T04:59:03,120 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:03,121 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK], DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]) is bad. 2024-12-03T04:59:03,121 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741851_1034 2024-12-03T04:59:03,121 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK] 2024-12-03T04:59:03,122 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:03,123 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]) is bad. 2024-12-03T04:59:03,123 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741852_1035 2024-12-03T04:59:03,123 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK] 2024-12-03T04:59:03,124 WARN [IPC Server handler 0 on default port 38443 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T04:59:03,124 WARN [IPC Server handler 0 on default port 38443 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T04:59:03,124 WARN [IPC Server handler 0 on default port 38443 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T04:59:03,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741853_1036 (size=10347) 2024-12-03T04:59:03,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/0543a8c3813c40d68f1c5c77f4726226 2024-12-03T04:59:03,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/0543a8c3813c40d68f1c5c77f4726226 as hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/0543a8c3813c40d68f1c5c77f4726226 2024-12-03T04:59:03,550 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/0543a8c3813c40d68f1c5c77f4726226, entries=5, sequenceid=11, filesize=10.1 K 2024-12-03T04:59:03,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for e4140b7a4d517ddb89f7b0d124e5ba92 in 462ms, sequenceid=11, compaction requested=false 2024-12-03T04:59:03,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e4140b7a4d517ddb89f7b0d124e5ba92: 2024-12-03T04:59:03,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44203 {}] regionserver.HRegion(8855): Flush requested on e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:59:03,717 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e4140b7a4d517ddb89f7b0d124e5ba92 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-03T04:59:03,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/5e30a664d55a480ca6cd3c26f907ad1b is 1080, key is row0007/info:/1733201943091/Put/seqid=0 2024-12-03T04:59:03,725 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38707 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:03,725 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK], DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]) is bad. 2024-12-03T04:59:03,725 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741854_1037 2024-12-03T04:59:03,725 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:57962 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data8]'}, localName='127.0.0.1:35215', datanodeUuid='0b88c979-8655-42f1-8356-0f1c738e6cf7', xmitsInProgress=0}:Exception transferring block BP-851166150-172.17.0.3-1733201923677:blk_1073741854_1037 to mirror 127.0.0.1:38707 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:03,726 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:57962 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T04:59:03,726 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:57962 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:35215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57962 dst: /127.0.0.1:35215 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:03,726 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK] 2024-12-03T04:59:03,729 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40009 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:03,729 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:57964 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data8]'}, localName='127.0.0.1:35215', datanodeUuid='0b88c979-8655-42f1-8356-0f1c738e6cf7', xmitsInProgress=0}:Exception transferring block BP-851166150-172.17.0.3-1733201923677:blk_1073741855_1038 to mirror 127.0.0.1:40009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:03,729 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK], DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]) is bad. 2024-12-03T04:59:03,729 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741855_1038 2024-12-03T04:59:03,729 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:57964 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T04:59:03,729 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:57964 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:35215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57964 dst: /127.0.0.1:35215 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:03,730 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK] 2024-12-03T04:59:03,731 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:03,731 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]) is bad. 2024-12-03T04:59:03,731 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741856_1039 2024-12-03T04:59:03,731 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK] 2024-12-03T04:59:03,733 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:03,733 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]) is bad. 2024-12-03T04:59:03,733 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741857_1040 2024-12-03T04:59:03,733 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK] 2024-12-03T04:59:03,734 WARN [IPC Server handler 1 on default port 38443 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T04:59:03,734 WARN [IPC Server handler 1 on default port 38443 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T04:59:03,734 WARN [IPC Server handler 1 on default port 38443 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T04:59:03,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741858_1041 (size=12506) 2024-12-03T04:59:03,980 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:04,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/5e30a664d55a480ca6cd3c26f907ad1b 2024-12-03T04:59:04,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/5e30a664d55a480ca6cd3c26f907ad1b as hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5e30a664d55a480ca6cd3c26f907ad1b 2024-12-03T04:59:04,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5e30a664d55a480ca6cd3c26f907ad1b, entries=7, sequenceid=24, filesize=12.2 K 2024-12-03T04:59:04,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for e4140b7a4d517ddb89f7b0d124e5ba92 in 447ms, sequenceid=24, compaction requested=false 2024-12-03T04:59:04,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e4140b7a4d517ddb89f7b0d124e5ba92: 2024-12-03T04:59:04,164 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-03T04:59:04,165 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T04:59:04,165 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5e30a664d55a480ca6cd3c26f907ad1b because midkey is the same as first or last row 2024-12-03T04:59:04,941 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:05,086 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:05,086 WARN [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]] 2024-12-03T04:59:05,086 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 32beccd76b35%2C44203%2C1733201924824:(num 1733201943037) roll requested 2024-12-03T04:59:05,087 INFO [regionserver/32beccd76b35:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C44203%2C1733201924824.1733201945087 2024-12-03T04:59:05,091 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:05,091 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]) is bad. 2024-12-03T04:59:05,091 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741859_1042 2024-12-03T04:59:05,092 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK] 2024-12-03T04:59:05,093 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:05,093 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]) is bad. 2024-12-03T04:59:05,093 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741860_1043 2024-12-03T04:59:05,094 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK] 2024-12-03T04:59:05,097 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45599 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:05,097 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:57980 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data8]'}, localName='127.0.0.1:35215', datanodeUuid='0b88c979-8655-42f1-8356-0f1c738e6cf7', xmitsInProgress=0}:Exception transferring block BP-851166150-172.17.0.3-1733201923677:blk_1073741861_1044 to mirror 127.0.0.1:45599 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:05,097 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK], DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]) is bad. 2024-12-03T04:59:05,097 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741861_1044 2024-12-03T04:59:05,097 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:57980 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T04:59:05,097 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:57980 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:35215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57980 dst: /127.0.0.1:35215 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:05,098 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK] 2024-12-03T04:59:05,099 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:05,099 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]) is bad. 2024-12-03T04:59:05,099 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741862_1045 2024-12-03T04:59:05,100 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK] 2024-12-03T04:59:05,100 WARN [IPC Server handler 4 on default port 38443 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T04:59:05,100 WARN [IPC Server handler 4 on default port 38443 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T04:59:05,100 WARN [IPC Server handler 4 on default port 38443 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T04:59:05,104 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:05,104 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:05,104 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:05,104 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:05,104 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:05,105 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201943037 with entries=22, filesize=21.95 KB; new WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201945087 2024-12-03T04:59:05,107 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40501:40501)] 2024-12-03T04:59:05,107 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201925416 is not closed yet, will try archiving it next time 2024-12-03T04:59:05,107 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201943037 is not closed yet, will try archiving it next time 2024-12-03T04:59:05,107 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201936995 to hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/oldWALs/32beccd76b35%2C44203%2C1733201924824.1733201936995 2024-12-03T04:59:05,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741848_1031 (size=22485) 2024-12-03T04:59:05,109 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201941013 to hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/oldWALs/32beccd76b35%2C44203%2C1733201924824.1733201941013 2024-12-03T04:59:05,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44203 {}] regionserver.HRegion(8855): Flush requested on e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:59:05,143 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e4140b7a4d517ddb89f7b0d124e5ba92 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-03T04:59:05,149 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/69085cc5be794f8280c4717e2da66513 is 1079, key is tmprow/info:/1733201945142/Put/seqid=0 2024-12-03T04:59:05,151 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:05,151 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK], DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]) is bad. 2024-12-03T04:59:05,151 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741864_1047 2024-12-03T04:59:05,152 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK] 2024-12-03T04:59:05,153 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:05,153 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]) is bad. 2024-12-03T04:59:05,153 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741865_1048 2024-12-03T04:59:05,154 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK] 2024-12-03T04:59:05,157 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40009 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:05,157 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK], DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]) is bad. 2024-12-03T04:59:05,157 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741866_1049 2024-12-03T04:59:05,157 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:57998 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data8]'}, localName='127.0.0.1:35215', datanodeUuid='0b88c979-8655-42f1-8356-0f1c738e6cf7', xmitsInProgress=0}:Exception transferring block BP-851166150-172.17.0.3-1733201923677:blk_1073741866_1049 to mirror 127.0.0.1:40009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:05,157 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK] 2024-12-03T04:59:05,157 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:57998 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T04:59:05,158 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:57998 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:35215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57998 dst: /127.0.0.1:35215 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:05,160 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38707 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:05,160 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK], DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]) is bad. 2024-12-03T04:59:05,160 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741867_1050 2024-12-03T04:59:05,160 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:58000 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741867_1050] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data8]'}, localName='127.0.0.1:35215', datanodeUuid='0b88c979-8655-42f1-8356-0f1c738e6cf7', xmitsInProgress=0}:Exception transferring block BP-851166150-172.17.0.3-1733201923677:blk_1073741867_1050 to mirror 127.0.0.1:38707 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:05,160 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:58000 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741867_1050] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T04:59:05,161 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK] 2024-12-03T04:59:05,160 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:58000 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741867_1050] {}] datanode.DataXceiver(331): 127.0.0.1:35215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58000 dst: /127.0.0.1:35215 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:05,161 WARN [IPC Server handler 1 on default port 38443 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T04:59:05,161 WARN [IPC Server handler 1 on default port 38443 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T04:59:05,161 WARN [IPC Server handler 1 on default port 38443 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T04:59:05,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741868_1051 (size=6027) 2024-12-03T04:59:05,167 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/69085cc5be794f8280c4717e2da66513 2024-12-03T04:59:05,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/69085cc5be794f8280c4717e2da66513 as hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/69085cc5be794f8280c4717e2da66513 2024-12-03T04:59:05,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/69085cc5be794f8280c4717e2da66513, entries=1, sequenceid=34, filesize=5.9 K 2024-12-03T04:59:05,184 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for e4140b7a4d517ddb89f7b0d124e5ba92 in 40ms, sequenceid=34, compaction requested=true 2024-12-03T04:59:05,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e4140b7a4d517ddb89f7b0d124e5ba92: 2024-12-03T04:59:05,184 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-03T04:59:05,184 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T04:59:05,184 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5e30a664d55a480ca6cd3c26f907ad1b because midkey is the same as first or last row 2024-12-03T04:59:05,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4140b7a4d517ddb89f7b0d124e5ba92:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T04:59:05,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T04:59:05,185 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T04:59:05,186 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T04:59:05,186 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.HStore(1541): e4140b7a4d517ddb89f7b0d124e5ba92/info is initiating minor compaction (all files) 2024-12-03T04:59:05,186 INFO [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e4140b7a4d517ddb89f7b0d124e5ba92/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. 2024-12-03T04:59:05,187 INFO [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/0543a8c3813c40d68f1c5c77f4726226, hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5e30a664d55a480ca6cd3c26f907ad1b, hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/69085cc5be794f8280c4717e2da66513] into tmpdir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp, totalSize=28.2 K 2024-12-03T04:59:05,187 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0543a8c3813c40d68f1c5c77f4726226, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733201939026 2024-12-03T04:59:05,188 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5e30a664d55a480ca6cd3c26f907ad1b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733201943091 2024-12-03T04:59:05,188 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] compactions.Compactor(225): Compacting 69085cc5be794f8280c4717e2da66513, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733201945142 2024-12-03T04:59:05,213 INFO [RS:0;32beccd76b35:44203-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4140b7a4d517ddb89f7b0d124e5ba92#info#compaction#21 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T04:59:05,214 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/5c68345fc4204159ab4e415ea61c5cfd is 1080, key is row0002/info:/1733201939026/Put/seqid=0 2024-12-03T04:59:05,216 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:05,216 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK], DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]) is bad. 2024-12-03T04:59:05,216 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741869_1052 2024-12-03T04:59:05,217 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK] 2024-12-03T04:59:05,218 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:05,219 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]) is bad. 2024-12-03T04:59:05,219 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741870_1053 2024-12-03T04:59:05,219 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK] 2024-12-03T04:59:05,225 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33573 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:05,225 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:58038 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data8]'}, localName='127.0.0.1:35215', datanodeUuid='0b88c979-8655-42f1-8356-0f1c738e6cf7', xmitsInProgress=0}:Exception transferring block BP-851166150-172.17.0.3-1733201923677:blk_1073741871_1054 to mirror 127.0.0.1:33573 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:05,225 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK], DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]) is bad. 2024-12-03T04:59:05,225 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741871_1054 2024-12-03T04:59:05,226 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:58038 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T04:59:05,226 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:58038 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:35215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58038 dst: /127.0.0.1:35215 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:05,226 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK] 2024-12-03T04:59:05,230 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:58046 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data8]'}, localName='127.0.0.1:35215', datanodeUuid='0b88c979-8655-42f1-8356-0f1c738e6cf7', xmitsInProgress=0}:Exception transferring block BP-851166150-172.17.0.3-1733201923677:blk_1073741872_1055 to mirror 127.0.0.1:45599 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:05,230 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:58046 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T04:59:05,230 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:58046 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:35215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58046 dst: /127.0.0.1:35215 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:05,230 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45599 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:05,231 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK], DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]) is bad. 2024-12-03T04:59:05,231 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741872_1055 2024-12-03T04:59:05,232 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK] 2024-12-03T04:59:05,232 WARN [IPC Server handler 1 on default port 38443 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T04:59:05,233 WARN [IPC Server handler 1 on default port 38443 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T04:59:05,233 WARN [IPC Server handler 1 on default port 38443 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T04:59:05,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741873_1056 (size=17994) 2024-12-03T04:59:05,252 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/5c68345fc4204159ab4e415ea61c5cfd as hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5c68345fc4204159ab4e415ea61c5cfd 2024-12-03T04:59:05,264 INFO [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e4140b7a4d517ddb89f7b0d124e5ba92/info of e4140b7a4d517ddb89f7b0d124e5ba92 into 5c68345fc4204159ab4e415ea61c5cfd(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T04:59:05,264 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e4140b7a4d517ddb89f7b0d124e5ba92: 2024-12-03T04:59:05,266 INFO [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92., storeName=e4140b7a4d517ddb89f7b0d124e5ba92/info, priority=13, startTime=1733201945184; duration=0sec 2024-12-03T04:59:05,266 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-03T04:59:05,266 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T04:59:05,266 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5c68345fc4204159ab4e415ea61c5cfd because midkey is the same as first or last row 2024-12-03T04:59:05,266 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-03T04:59:05,266 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T04:59:05,266 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5c68345fc4204159ab4e415ea61c5cfd because midkey is the same as first or last row 2024-12-03T04:59:05,266 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-03T04:59:05,266 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T04:59:05,266 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5c68345fc4204159ab4e415ea61c5cfd because midkey is the same as first or last row 2024-12-03T04:59:05,266 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T04:59:05,266 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4140b7a4d517ddb89f7b0d124e5ba92:info 2024-12-03T04:59:05,510 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201925416 is not closed yet, will try archiving it next time 2024-12-03T04:59:05,510 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201943037 to hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/oldWALs/32beccd76b35%2C44203%2C1733201924824.1733201943037 2024-12-03T04:59:05,686 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5cd9f002[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35215, datanodeUuid=0b88c979-8655-42f1-8356-0f1c738e6cf7, infoPort=40501, infoSecurePort=0, ipcPort=45499, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677):Failed to transfer BP-851166150-172.17.0.3-1733201923677:blk_1073741853_1036 to 127.0.0.1:33573 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:05,686 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@689cf2a0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35215, datanodeUuid=0b88c979-8655-42f1-8356-0f1c738e6cf7, infoPort=40501, infoSecurePort=0, ipcPort=45499, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677):Failed to transfer BP-851166150-172.17.0.3-1733201923677:blk_1073741843_1026 to 127.0.0.1:38707 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:05,980 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:06,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44203 {}] regionserver.HRegion(8855): Flush requested on e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:59:06,582 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e4140b7a4d517ddb89f7b0d124e5ba92 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-03T04:59:06,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/7f798c0257fd454c932edd8fb4a888c0 is 1079, key is tmprow/info:/1733201946574/Put/seqid=0 2024-12-03T04:59:06,608 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:06,609 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK], DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]) is bad. 2024-12-03T04:59:06,611 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741874_1057 2024-12-03T04:59:06,614 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK] 2024-12-03T04:59:06,616 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:06,616 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]) is bad. 2024-12-03T04:59:06,616 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741875_1058 2024-12-03T04:59:06,620 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38707,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK] 2024-12-03T04:59:06,654 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:58066 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data8]'}, localName='127.0.0.1:35215', datanodeUuid='0b88c979-8655-42f1-8356-0f1c738e6cf7', xmitsInProgress=0}:Exception transferring block BP-851166150-172.17.0.3-1733201923677:blk_1073741876_1059 to mirror 127.0.0.1:45599 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:06,655 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:58066 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T04:59:06,655 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:58066 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:35215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58066 dst: /127.0.0.1:35215 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:06,655 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45599 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:06,656 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK], DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]) is bad. 2024-12-03T04:59:06,656 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741876_1059 2024-12-03T04:59:06,657 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK] 2024-12-03T04:59:06,660 WARN [Thread-960 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:06,660 WARN [Thread-960 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]) is bad. 2024-12-03T04:59:06,660 WARN [Thread-960 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741877_1060 2024-12-03T04:59:06,660 WARN [Thread-960 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK] 2024-12-03T04:59:06,661 WARN [IPC Server handler 4 on default port 38443 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-03T04:59:06,661 WARN [IPC Server handler 4 on default port 38443 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-03T04:59:06,661 WARN [IPC Server handler 4 on default port 38443 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-03T04:59:06,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741878_1061 (size=6027) 2024-12-03T04:59:06,686 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@689cf2a0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35215, datanodeUuid=0b88c979-8655-42f1-8356-0f1c738e6cf7, infoPort=40501, infoSecurePort=0, ipcPort=45499, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677):Failed to transfer BP-851166150-172.17.0.3-1733201923677:blk_1073741858_1041 to 127.0.0.1:45599 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:06,941 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:07,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/7f798c0257fd454c932edd8fb4a888c0 2024-12-03T04:59:07,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/7f798c0257fd454c932edd8fb4a888c0 as hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/7f798c0257fd454c932edd8fb4a888c0 2024-12-03T04:59:07,092 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/7f798c0257fd454c932edd8fb4a888c0, entries=1, sequenceid=45, filesize=5.9 K 2024-12-03T04:59:07,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for e4140b7a4d517ddb89f7b0d124e5ba92 in 512ms, sequenceid=45, compaction requested=false 2024-12-03T04:59:07,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e4140b7a4d517ddb89f7b0d124e5ba92: 2024-12-03T04:59:07,094 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-03T04:59:07,094 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T04:59:07,095 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5c68345fc4204159ab4e415ea61c5cfd because midkey is the same as first or last row 2024-12-03T04:59:07,107 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:07,108 WARN [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-03T04:59:07,230 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:59:07,238 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:59:07,245 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:59:07,245 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:59:07,245 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T04:59:07,249 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@14a36e87{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:59:07,250 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31d8af0c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:59:07,421 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e3dea39{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/java.io.tmpdir/jetty-localhost-45555-hadoop-hdfs-3_4_1-tests_jar-_-any-9195747879206215736/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:59:07,422 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@73c11417{HTTP/1.1, (http/1.1)}{localhost:45555} 2024-12-03T04:59:07,422 INFO [Time-limited test {}] server.Server(415): Started @131584ms 2024-12-03T04:59:07,424 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T04:59:07,588 WARN [Thread-979 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T04:59:07,602 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2a9b946e1b8c8686 with lease ID 0x9c1667867a4b148c: from storage DS-f1b292ff-4c29-4e13-be8c-722756eca30c node DatanodeRegistration(127.0.0.1:34499, datanodeUuid=b8f66bf4-78e5-4a33-b801-cf0b5bb96189, infoPort=37523, infoSecurePort=0, ipcPort=39311, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T04:59:07,602 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2a9b946e1b8c8686 with lease ID 0x9c1667867a4b148c: from storage DS-e2141e60-29f6-4d6d-9646-76d81ab1d44d node DatanodeRegistration(127.0.0.1:34499, datanodeUuid=b8f66bf4-78e5-4a33-b801-cf0b5bb96189, infoPort=37523, infoSecurePort=0, ipcPort=39311, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:59:07,981 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:08,686 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5cd9f002[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35215, datanodeUuid=0b88c979-8655-42f1-8356-0f1c738e6cf7, infoPort=40501, infoSecurePort=0, ipcPort=45499, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677):Failed to transfer BP-851166150-172.17.0.3-1733201923677:blk_1073741868_1051 to 127.0.0.1:40009 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:08,686 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@689cf2a0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35215, datanodeUuid=0b88c979-8655-42f1-8356-0f1c738e6cf7, infoPort=40501, infoSecurePort=0, ipcPort=45499, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677):Failed to transfer BP-851166150-172.17.0.3-1733201923677:blk_1073741848_1031 to 127.0.0.1:40009 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:08,942 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:09,108 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:09,685 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@689cf2a0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35215, datanodeUuid=0b88c979-8655-42f1-8356-0f1c738e6cf7, infoPort=40501, infoSecurePort=0, ipcPort=45499, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677):Failed to transfer BP-851166150-172.17.0.3-1733201923677:blk_1073741873_1056 to 127.0.0.1:33573 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:09,685 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5cd9f002[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35215, datanodeUuid=0b88c979-8655-42f1-8356-0f1c738e6cf7, infoPort=40501, infoSecurePort=0, ipcPort=45499, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677):Failed to transfer BP-851166150-172.17.0.3-1733201923677:blk_1073741878_1061 to 127.0.0.1:40009 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:09,981 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:10,942 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:11,108 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:11,982 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:12,943 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:13,109 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:13,982 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:14,656 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T04:59:14,943 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:15,076 ERROR [FSHLog-0-hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData-prefix:32beccd76b35,35531,1733201924718 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:15,076 WARN [FSHLog-0-hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData-prefix:32beccd76b35,35531,1733201924718 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:15,076 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 32beccd76b35%2C35531%2C1733201924718:(num 1733201924973) roll requested 2024-12-03T04:59:15,076 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C35531%2C1733201924718.1733201955076 2024-12-03T04:59:15,080 WARN [Thread-999 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:15,080 WARN [Thread-999 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK], DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]) is bad. 2024-12-03T04:59:15,080 WARN [Thread-999 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741879_1062 2024-12-03T04:59:15,081 WARN [Thread-999 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK] 2024-12-03T04:59:15,083 WARN [Thread-999 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33573 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:15,083 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1028590899_22 at /127.0.0.1:41778 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data8]'}, localName='127.0.0.1:35215', datanodeUuid='0b88c979-8655-42f1-8356-0f1c738e6cf7', xmitsInProgress=0}:Exception transferring block BP-851166150-172.17.0.3-1733201923677:blk_1073741880_1063 to mirror 127.0.0.1:33573 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:15,084 WARN [Thread-999 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK], DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]) is bad. 2024-12-03T04:59:15,084 WARN [Thread-999 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741880_1063 2024-12-03T04:59:15,084 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1028590899_22 at /127.0.0.1:41778 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T04:59:15,084 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1028590899_22 at /127.0.0.1:41778 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:35215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41778 dst: /127.0.0.1:35215 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:15,084 WARN [Thread-999 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK] 2024-12-03T04:59:15,085 WARN [Thread-999 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:15,085 WARN [Thread-999 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]) is bad. 2024-12-03T04:59:15,085 WARN [Thread-999 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741881_1064 2024-12-03T04:59:15,086 WARN [Thread-999 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK] 2024-12-03T04:59:15,094 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:15,094 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:15,094 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:15,094 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:15,094 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:15,095 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/WALs/32beccd76b35,35531,1733201924718/32beccd76b35%2C35531%2C1733201924718.1733201924973 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/WALs/32beccd76b35,35531,1733201924718/32beccd76b35%2C35531%2C1733201924718.1733201955076 2024-12-03T04:59:15,095 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:15,095 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:15,095 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/WALs/32beccd76b35,35531,1733201924718/32beccd76b35%2C35531%2C1733201924718.1733201924973 2024-12-03T04:59:15,096 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37523:37523),(127.0.0.1/127.0.0.1:40501:40501)] 2024-12-03T04:59:15,096 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/WALs/32beccd76b35,35531,1733201924718/32beccd76b35%2C35531%2C1733201924718.1733201924973 is not closed yet, will try archiving it next time 2024-12-03T04:59:15,096 WARN [IPC Server handler 1 on default port 38443 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/WALs/32beccd76b35,35531,1733201924718/32beccd76b35%2C35531%2C1733201924718.1733201924973 has not been closed. Lease recovery is in progress. RecoveryId = 1066 for block blk_1073741830_1006 2024-12-03T04:59:15,096 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/WALs/32beccd76b35,35531,1733201924718/32beccd76b35%2C35531%2C1733201924718.1733201924973 after 1ms 2024-12-03T04:59:15,109 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:15,982 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:17,109 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:17,628 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7a838b51 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-851166150-172.17.0.3-1733201923677:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:45599,null,null]) java.net.ConnectException: Call From 32beccd76b35/172.17.0.3 to localhost:44181 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-03T04:59:17,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34499 is added to blk_1073741833_1020 (size=455) 2024-12-03T04:59:17,983 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:18,040 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201925416 to hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/oldWALs/32beccd76b35%2C44203%2C1733201924824.1733201925416 2024-12-03T04:59:19,098 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/WALs/32beccd76b35,35531,1733201924718/32beccd76b35%2C35531%2C1733201924718.1733201924973 after 4003ms 2024-12-03T04:59:19,110 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:19,983 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:20,596 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4bb31557[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34499, datanodeUuid=b8f66bf4-78e5-4a33-b801-cf0b5bb96189, infoPort=37523, infoSecurePort=0, ipcPort=39311, storageInfo=lv=-57;cid=testClusterID;nsid=951368906;c=1733201923677):Failed to transfer BP-851166150-172.17.0.3-1733201923677:blk_1073741833_1020 to 127.0.0.1:40009 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:21,110 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:21,984 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:22,963 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C44203%2C1733201924824.1733201962963 2024-12-03T04:59:22,966 WARN [Thread-1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:22,967 WARN [Thread-1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741883_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK], DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]) is bad. 2024-12-03T04:59:22,967 WARN [Thread-1010 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741883_1067 2024-12-03T04:59:22,967 WARN [Thread-1010 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK] 2024-12-03T04:59:22,987 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:22,987 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:22,987 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:22,987 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:22,988 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:22,988 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201945087 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201962963 2024-12-03T04:59:22,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741863_1046 (size=13591) 2024-12-03T04:59:22,990 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37523:37523),(127.0.0.1/127.0.0.1:40501:40501)] 2024-12-03T04:59:22,990 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.1733201945087 is not closed yet, will try archiving it next time 2024-12-03T04:59:23,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44203 {}] regionserver.HRegion(8855): Flush requested on e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:59:23,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e4140b7a4d517ddb89f7b0d124e5ba92 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-03T04:59:23,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/fe4df22f736a47f4b22d01e8005bf975 is 1080, key is row0013/info:/1733201962992/Put/seqid=0 2024-12-03T04:59:23,013 WARN [Thread-1016 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,013 WARN [Thread-1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741885_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK], DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]) is bad. 2024-12-03T04:59:23,014 WARN [Thread-1016 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741885_1069 2024-12-03T04:59:23,018 WARN [Thread-1016 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK] 2024-12-03T04:59:23,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741886_1070 (size=11421) 2024-12-03T04:59:23,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34499 is added to blk_1073741886_1070 (size=11421) 2024-12-03T04:59:23,040 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/fe4df22f736a47f4b22d01e8005bf975 2024-12-03T04:59:23,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/fe4df22f736a47f4b22d01e8005bf975 as hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/fe4df22f736a47f4b22d01e8005bf975 2024-12-03T04:59:23,055 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/fe4df22f736a47f4b22d01e8005bf975, entries=6, sequenceid=55, filesize=11.2 K 2024-12-03T04:59:23,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for e4140b7a4d517ddb89f7b0d124e5ba92 in 52ms, sequenceid=55, compaction requested=true 2024-12-03T04:59:23,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e4140b7a4d517ddb89f7b0d124e5ba92: 2024-12-03T04:59:23,057 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-12-03T04:59:23,057 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T04:59:23,057 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5c68345fc4204159ab4e415ea61c5cfd because midkey is the same as first or last row 2024-12-03T04:59:23,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e4140b7a4d517ddb89f7b0d124e5ba92:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T04:59:23,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T04:59:23,057 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T04:59:23,059 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T04:59:23,059 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.HStore(1541): e4140b7a4d517ddb89f7b0d124e5ba92/info is initiating minor compaction (all files) 2024-12-03T04:59:23,059 INFO [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e4140b7a4d517ddb89f7b0d124e5ba92/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. 2024-12-03T04:59:23,059 INFO [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5c68345fc4204159ab4e415ea61c5cfd, hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/7f798c0257fd454c932edd8fb4a888c0, hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/fe4df22f736a47f4b22d01e8005bf975] into tmpdir=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp, totalSize=34.6 K 2024-12-03T04:59:23,060 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5c68345fc4204159ab4e415ea61c5cfd, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733201939026 2024-12-03T04:59:23,060 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7f798c0257fd454c932edd8fb4a888c0, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733201946574 2024-12-03T04:59:23,061 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] compactions.Compactor(225): Compacting fe4df22f736a47f4b22d01e8005bf975, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733201946989 2024-12-03T04:59:23,079 INFO [RS:0;32beccd76b35:44203-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e4140b7a4d517ddb89f7b0d124e5ba92#info#compaction#24 average throughput is 5.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T04:59:23,079 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/1c097dd1a91b423ebbe13b947fef9265 is 1080, key is row0002/info:/1733201939026/Put/seqid=0 2024-12-03T04:59:23,081 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,081 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741887_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK], DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]) is bad. 2024-12-03T04:59:23,082 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741887_1071 2024-12-03T04:59:23,082 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK] 2024-12-03T04:59:23,083 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,083 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741888_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK], DatanodeInfoWithStorage[127.0.0.1:34499,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]) is bad. 2024-12-03T04:59:23,083 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741888_1072 2024-12-03T04:59:23,084 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK] 2024-12-03T04:59:23,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741889_1073 (size=23502) 2024-12-03T04:59:23,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34499 is added to blk_1073741889_1073 (size=23502) 2024-12-03T04:59:23,101 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/1c097dd1a91b423ebbe13b947fef9265 as hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/1c097dd1a91b423ebbe13b947fef9265 2024-12-03T04:59:23,108 INFO [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e4140b7a4d517ddb89f7b0d124e5ba92/info of e4140b7a4d517ddb89f7b0d124e5ba92 into 1c097dd1a91b423ebbe13b947fef9265(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T04:59:23,108 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e4140b7a4d517ddb89f7b0d124e5ba92: 2024-12-03T04:59:23,108 INFO [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92., storeName=e4140b7a4d517ddb89f7b0d124e5ba92/info, priority=13, startTime=1733201963057; duration=0sec 2024-12-03T04:59:23,108 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-03T04:59:23,108 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T04:59:23,108 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/1c097dd1a91b423ebbe13b947fef9265 because midkey is the same as first or last row 2024-12-03T04:59:23,108 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-03T04:59:23,108 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T04:59:23,109 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/1c097dd1a91b423ebbe13b947fef9265 because midkey is the same as first or last row 2024-12-03T04:59:23,109 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-03T04:59:23,109 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T04:59:23,109 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/1c097dd1a91b423ebbe13b947fef9265 because midkey is the same as first or last row 2024-12-03T04:59:23,109 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T04:59:23,109 DEBUG [RS:0;32beccd76b35:44203-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e4140b7a4d517ddb89f7b0d124e5ba92:info 2024-12-03T04:59:23,110 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,110 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-03T04:59:23,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T04:59:23,229 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T04:59:23,230 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T04:59:23,230 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:59:23,230 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:59:23,230 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T04:59:23,230 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T04:59:23,230 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=341552348, stopped=false 2024-12-03T04:59:23,230 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=32beccd76b35,35531,1733201924718 2024-12-03T04:59:23,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T04:59:23,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38103-0x101517586e10002, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T04:59:23,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:23,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T04:59:23,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38103-0x101517586e10002, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:23,232 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T04:59:23,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:23,233 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T04:59:23,233 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T04:59:23,233 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:59:23,233 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '32beccd76b35,44203,1733201924824' ***** 2024-12-03T04:59:23,233 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T04:59:23,233 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '32beccd76b35,38103,1733201925919' ***** 2024-12-03T04:59:23,233 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T04:59:23,233 INFO [RS:0;32beccd76b35:44203 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T04:59:23,233 INFO [RS:1;32beccd76b35:38103 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T04:59:23,234 INFO [RS:0;32beccd76b35:44203 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T04:59:23,234 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T04:59:23,234 INFO [RS:1;32beccd76b35:38103 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T04:59:23,234 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T04:59:23,234 INFO [RS:0;32beccd76b35:44203 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T04:59:23,234 INFO [RS:1;32beccd76b35:38103 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T04:59:23,234 INFO [RS:1;32beccd76b35:38103 {}] regionserver.HRegionServer(959): stopping server 32beccd76b35,38103,1733201925919 2024-12-03T04:59:23,234 INFO [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer(3091): Received CLOSE for e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:59:23,234 INFO [RS:1;32beccd76b35:38103 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T04:59:23,234 INFO [RS:1;32beccd76b35:38103 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;32beccd76b35:38103. 2024-12-03T04:59:23,234 DEBUG [RS:1;32beccd76b35:38103 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T04:59:23,234 DEBUG [RS:1;32beccd76b35:38103 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:59:23,234 INFO [RS:1;32beccd76b35:38103 {}] regionserver.HRegionServer(976): stopping server 32beccd76b35,38103,1733201925919; all regions closed. 2024-12-03T04:59:23,234 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38103-0x101517586e10002, quorum=127.0.0.1:50617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:59:23,234 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:59:23,235 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:59:23,235 INFO [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer(959): stopping server 32beccd76b35,44203,1733201924824 2024-12-03T04:59:23,235 INFO [RS:0;32beccd76b35:44203 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T04:59:23,235 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:23,235 INFO [RS:0;32beccd76b35:44203 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;32beccd76b35:44203. 2024-12-03T04:59:23,235 DEBUG [RS:0;32beccd76b35:44203 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T04:59:23,235 DEBUG [RS:0;32beccd76b35:44203 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:59:23,235 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:23,235 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e4140b7a4d517ddb89f7b0d124e5ba92, disabling compactions & flushes 2024-12-03T04:59:23,235 INFO [RS:0;32beccd76b35:44203 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T04:59:23,235 INFO [RS:0;32beccd76b35:44203 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T04:59:23,235 INFO [RS:0;32beccd76b35:44203 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T04:59:23,235 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. 2024-12-03T04:59:23,235 INFO [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T04:59:23,235 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. 2024-12-03T04:59:23,235 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. after waiting 0 ms 2024-12-03T04:59:23,235 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. 2024-12-03T04:59:23,235 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing e4140b7a4d517ddb89f7b0d124e5ba92 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-12-03T04:59:23,236 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:23,236 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:23,236 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:23,237 INFO [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-03T04:59:23,238 DEBUG [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, e4140b7a4d517ddb89f7b0d124e5ba92=TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92.} 2024-12-03T04:59:23,238 DEBUG [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, e4140b7a4d517ddb89f7b0d124e5ba92 2024-12-03T04:59:23,238 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T04:59:23,238 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T04:59:23,238 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T04:59:23,238 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T04:59:23,238 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T04:59:23,238 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-03T04:59:23,238 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,238 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,238 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 2024-12-03T04:59:23,238 ERROR [FSHLog-0-hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0-prefix:32beccd76b35,44203,1733201924824.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,238 WARN [FSHLog-0-hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0-prefix:32beccd76b35,44203,1733201924824.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,239 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 32beccd76b35%2C44203%2C1733201924824.meta:.meta(num 1733201925707) roll requested 2024-12-03T04:59:23,239 WARN [IPC Server handler 4 on default port 38443 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741837_1013 2024-12-03T04:59:23,239 INFO [regionserver/32beccd76b35:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C44203%2C1733201924824.meta.1733201963239.meta 2024-12-03T04:59:23,239 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 after 1ms 2024-12-03T04:59:23,242 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,242 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/f4d368cf0dda4d358716f7d2b322ddce is 1080, key is row0018/info:/1733201963006/Put/seqid=0 2024-12-03T04:59:23,242 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741890_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK], DatanodeInfoWithStorage[127.0.0.1:34499,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]) is bad. 2024-12-03T04:59:23,242 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741890_1075 2024-12-03T04:59:23,242 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK] 2024-12-03T04:59:23,250 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:55700 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741891_1076] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data8]'}, localName='127.0.0.1:35215', datanodeUuid='0b88c979-8655-42f1-8356-0f1c738e6cf7', xmitsInProgress=0}:Exception transferring block BP-851166150-172.17.0.3-1733201923677:blk_1073741891_1076 to mirror 127.0.0.1:40009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:23,250 WARN [Thread-1031 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1076 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40009 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,250 WARN [Thread-1031 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741891_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK], DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]) is bad. 2024-12-03T04:59:23,250 WARN [Thread-1031 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741891_1076 2024-12-03T04:59:23,250 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:55700 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741891_1076] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T04:59:23,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:55700 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741891_1076] {}] datanode.DataXceiver(331): 127.0.0.1:35215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55700 dst: /127.0.0.1:35215 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:23,251 WARN [Thread-1031 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK] 2024-12-03T04:59:23,251 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1077 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40009 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:55716 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741892_1077] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data8]'}, localName='127.0.0.1:35215', datanodeUuid='0b88c979-8655-42f1-8356-0f1c738e6cf7', xmitsInProgress=0}:Exception transferring block BP-851166150-172.17.0.3-1733201923677:blk_1073741892_1077 to mirror 127.0.0.1:40009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:23,251 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741892_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK], DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]) is bad. 2024-12-03T04:59:23,251 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741892_1077 2024-12-03T04:59:23,251 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:55716 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741892_1077] {}] datanode.BlockReceiver(316): Block 1073741892 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T04:59:23,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:55716 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741892_1077] {}] datanode.DataXceiver(331): 127.0.0.1:35215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55716 dst: /127.0.0.1:35215 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:23,252 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK] 2024-12-03T04:59:23,252 WARN [Thread-1031 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,253 WARN [Thread-1031 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741893_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]) is bad. 2024-12-03T04:59:23,253 WARN [Thread-1031 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741893_1078 2024-12-03T04:59:23,253 WARN [Thread-1031 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK] 2024-12-03T04:59:23,254 WARN [Thread-1031 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1080 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,255 WARN [Thread-1031 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741895_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK], DatanodeInfoWithStorage[127.0.0.1:34499,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]) is bad. 2024-12-03T04:59:23,255 WARN [Thread-1031 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741895_1080 2024-12-03T04:59:23,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:48816 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741894_1079] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data4]'}, localName='127.0.0.1:34499', datanodeUuid='b8f66bf4-78e5-4a33-b801-cf0b5bb96189', xmitsInProgress=0}:Exception transferring block BP-851166150-172.17.0.3-1733201923677:blk_1073741894_1079 to mirror 127.0.0.1:33573 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:23,255 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1079 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33573 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,255 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:48816 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741894_1079] {}] datanode.BlockReceiver(316): Block 1073741894 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-03T04:59:23,255 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:48816 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741894_1079] {}] datanode.DataXceiver(331): 127.0.0.1:34499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48816 dst: /127.0.0.1:34499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:23,255 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741894_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34499,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]) is bad. 2024-12-03T04:59:23,255 WARN [Thread-1031 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK] 2024-12-03T04:59:23,255 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741894_1079 2024-12-03T04:59:23,256 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK] 2024-12-03T04:59:23,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34499 is added to blk_1073741896_1081 (size=11421) 2024-12-03T04:59:23,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741896_1081 (size=11421) 2024-12-03T04:59:23,263 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/f4d368cf0dda4d358716f7d2b322ddce 2024-12-03T04:59:23,263 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:23,263 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:23,264 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:23,264 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:23,264 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:23,264 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201963239.meta 2024-12-03T04:59:23,264 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,265 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,265 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta 2024-12-03T04:59:23,265 WARN [IPC Server handler 3 on default port 38443 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta has not been closed. Lease recovery is in progress. RecoveryId = 1083 for block blk_1073741834_1010 2024-12-03T04:59:23,265 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37523:37523),(127.0.0.1/127.0.0.1:40501:40501)] 2024-12-03T04:59:23,266 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta is not closed yet, will try archiving it next time 2024-12-03T04:59:23,266 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta after 1ms 2024-12-03T04:59:23,271 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/.tmp/info/f4d368cf0dda4d358716f7d2b322ddce as hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/f4d368cf0dda4d358716f7d2b322ddce 2024-12-03T04:59:23,277 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/f4d368cf0dda4d358716f7d2b322ddce, entries=6, sequenceid=65, filesize=11.2 K 2024-12-03T04:59:23,278 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for e4140b7a4d517ddb89f7b0d124e5ba92 in 43ms, sequenceid=65, compaction requested=false 2024-12-03T04:59:23,280 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/0543a8c3813c40d68f1c5c77f4726226, hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5e30a664d55a480ca6cd3c26f907ad1b, hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5c68345fc4204159ab4e415ea61c5cfd, hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/69085cc5be794f8280c4717e2da66513, hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/7f798c0257fd454c932edd8fb4a888c0, hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/fe4df22f736a47f4b22d01e8005bf975] to archive 2024-12-03T04:59:23,281 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T04:59:23,283 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/0543a8c3813c40d68f1c5c77f4726226 to hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/0543a8c3813c40d68f1c5c77f4726226 2024-12-03T04:59:23,285 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5e30a664d55a480ca6cd3c26f907ad1b to hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5e30a664d55a480ca6cd3c26f907ad1b 2024-12-03T04:59:23,286 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5c68345fc4204159ab4e415ea61c5cfd to hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/5c68345fc4204159ab4e415ea61c5cfd 2024-12-03T04:59:23,288 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/69085cc5be794f8280c4717e2da66513 to hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/69085cc5be794f8280c4717e2da66513 2024-12-03T04:59:23,291 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/7f798c0257fd454c932edd8fb4a888c0 to hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/7f798c0257fd454c932edd8fb4a888c0 2024-12-03T04:59:23,293 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/.tmp/info/67144c4888cc45bd8149f607ef90a3eb is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92./info:regioninfo/1733201926403/Put/seqid=0 2024-12-03T04:59:23,294 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/fe4df22f736a47f4b22d01e8005bf975 to hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/info/fe4df22f736a47f4b22d01e8005bf975 2024-12-03T04:59:23,294 WARN [Thread-1046 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1084 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,295 WARN [Thread-1046 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741898_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]) is bad. 2024-12-03T04:59:23,294 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=32beccd76b35:35531 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-03T04:59:23,295 WARN [Thread-1046 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741898_1084 2024-12-03T04:59:23,295 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [0543a8c3813c40d68f1c5c77f4726226=10347, 5e30a664d55a480ca6cd3c26f907ad1b=12506, 5c68345fc4204159ab4e415ea61c5cfd=17994, 69085cc5be794f8280c4717e2da66513=6027, 7f798c0257fd454c932edd8fb4a888c0=6027, fe4df22f736a47f4b22d01e8005bf975=11421] 2024-12-03T04:59:23,295 WARN [Thread-1046 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK] 2024-12-03T04:59:23,298 WARN [Thread-1046 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33573 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,298 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:55742 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741899_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data8]'}, localName='127.0.0.1:35215', datanodeUuid='0b88c979-8655-42f1-8356-0f1c738e6cf7', xmitsInProgress=0}:Exception transferring block BP-851166150-172.17.0.3-1733201923677:blk_1073741899_1085 to mirror 127.0.0.1:33573 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:23,298 WARN [Thread-1046 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK], DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]) is bad. 2024-12-03T04:59:23,298 WARN [Thread-1046 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741899_1085 2024-12-03T04:59:23,298 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:55742 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741899_1085] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T04:59:23,298 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:55742 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741899_1085] {}] datanode.DataXceiver(331): 127.0.0.1:35215:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55742 dst: /127.0.0.1:35215 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:23,298 WARN [Thread-1046 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK] 2024-12-03T04:59:23,300 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e4140b7a4d517ddb89f7b0d124e5ba92/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-12-03T04:59:23,300 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. 2024-12-03T04:59:23,301 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e4140b7a4d517ddb89f7b0d124e5ba92: Waiting for close lock at 1733201963235Running coprocessor pre-close hooks at 1733201963235Disabling compacts and flushes for region at 1733201963235Disabling writes for close at 1733201963235Obtaining lock to block concurrent updates at 1733201963236 (+1 ms)Preparing flush snapshotting stores in e4140b7a4d517ddb89f7b0d124e5ba92 at 1733201963236Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92., syncing WAL and waiting on mvcc, flushsize=dataSize=6455, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1733201963236Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. at 1733201963237 (+1 ms)Flushing e4140b7a4d517ddb89f7b0d124e5ba92/info: creating writer at 1733201963237Flushing e4140b7a4d517ddb89f7b0d124e5ba92/info: appending metadata at 1733201963241 (+4 ms)Flushing e4140b7a4d517ddb89f7b0d124e5ba92/info: closing flushed file at 1733201963241Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e112add: reopening flushed file at 1733201963270 (+29 ms)Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for e4140b7a4d517ddb89f7b0d124e5ba92 in 43ms, sequenceid=65, compaction requested=false at 1733201963278 (+8 ms)Writing region close event to WAL at 1733201963295 (+17 ms)Running coprocessor post-close hooks at 1733201963300 (+5 ms)Closed at 1733201963300 2024-12-03T04:59:23,301 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733201926026.e4140b7a4d517ddb89f7b0d124e5ba92. 2024-12-03T04:59:23,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34499 is added to blk_1073741900_1086 (size=7089) 2024-12-03T04:59:23,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741900_1086 (size=7089) 2024-12-03T04:59:23,304 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/.tmp/info/67144c4888cc45bd8149f607ef90a3eb 2024-12-03T04:59:23,325 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/.tmp/ns/f14a5b36cd7d4b4084c74daaafdcf371 is 43, key is default/ns:d/1733201925793/Put/seqid=0 2024-12-03T04:59:23,327 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,327 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK], DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK]) is bad. 2024-12-03T04:59:23,327 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741901_1087 2024-12-03T04:59:23,328 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45599,DS-1d3a0383-99f1-4153-9174-40e3212a865e,DISK] 2024-12-03T04:59:23,329 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1088 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,329 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741902_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK], DatanodeInfoWithStorage[127.0.0.1:34499,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]) is bad. 2024-12-03T04:59:23,329 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741902_1088 2024-12-03T04:59:23,330 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK] 2024-12-03T04:59:23,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34499 is added to blk_1073741903_1089 (size=5153) 2024-12-03T04:59:23,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741903_1089 (size=5153) 2024-12-03T04:59:23,338 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/.tmp/ns/f14a5b36cd7d4b4084c74daaafdcf371 2024-12-03T04:59:23,358 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/.tmp/table/91c5f5c655be4cadad4942ea1e70baa0 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733201926418/Put/seqid=0 2024-12-03T04:59:23,360 WARN [Thread-1060 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1090 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,360 WARN [Thread-1060 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741904_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK], DatanodeInfoWithStorage[127.0.0.1:35215,DS-cb18d10d-2d52-4122-8d00-16b74e559ba3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK]) is bad. 2024-12-03T04:59:23,360 WARN [Thread-1060 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741904_1090 2024-12-03T04:59:23,361 WARN [Thread-1060 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33573,DS-dcbe587d-f673-4048-a95b-9eff4dee3851,DISK] 2024-12-03T04:59:23,363 WARN [Thread-1060 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741905_1091 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40009 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:23,363 WARN [Thread-1060 {}] hdfs.DataStreamer(1731): Error Recovery for BP-851166150-172.17.0.3-1733201923677:blk_1073741905_1091 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34499,DS-f1b292ff-4c29-4e13-be8c-722756eca30c,DISK], DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK]) is bad. 2024-12-03T04:59:23,363 WARN [Thread-1060 {}] hdfs.DataStreamer(1850): Abandoning BP-851166150-172.17.0.3-1733201923677:blk_1073741905_1091 2024-12-03T04:59:23,363 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:48854 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741905_1091] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data4]'}, localName='127.0.0.1:34499', datanodeUuid='b8f66bf4-78e5-4a33-b801-cf0b5bb96189', xmitsInProgress=0}:Exception transferring block BP-851166150-172.17.0.3-1733201923677:blk_1073741905_1091 to mirror 127.0.0.1:40009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:23,364 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:48854 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741905_1091] {}] datanode.BlockReceiver(316): Block 1073741905 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-03T04:59:23,364 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2012536718_22 at /127.0.0.1:48854 [Receiving block BP-851166150-172.17.0.3-1733201923677:blk_1073741905_1091] {}] datanode.DataXceiver(331): 127.0.0.1:34499:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48854 dst: /127.0.0.1:34499 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:23,366 WARN [Thread-1060 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40009,DS-c1f4e767-cc62-467e-883b-672977a588e4,DISK] 2024-12-03T04:59:23,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34499 is added to blk_1073741906_1092 (size=5424) 2024-12-03T04:59:23,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741906_1092 (size=5424) 2024-12-03T04:59:23,380 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/.tmp/table/91c5f5c655be4cadad4942ea1e70baa0 2024-12-03T04:59:23,389 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/.tmp/info/67144c4888cc45bd8149f607ef90a3eb as hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/info/67144c4888cc45bd8149f607ef90a3eb 2024-12-03T04:59:23,400 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/info/67144c4888cc45bd8149f607ef90a3eb, entries=10, sequenceid=11, filesize=6.9 K 2024-12-03T04:59:23,402 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/.tmp/ns/f14a5b36cd7d4b4084c74daaafdcf371 as hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/ns/f14a5b36cd7d4b4084c74daaafdcf371 2024-12-03T04:59:23,409 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/ns/f14a5b36cd7d4b4084c74daaafdcf371, entries=2, sequenceid=11, filesize=5.0 K 2024-12-03T04:59:23,410 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/.tmp/table/91c5f5c655be4cadad4942ea1e70baa0 as hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/table/91c5f5c655be4cadad4942ea1e70baa0 2024-12-03T04:59:23,419 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/table/91c5f5c655be4cadad4942ea1e70baa0, entries=2, sequenceid=11, filesize=5.3 K 2024-12-03T04:59:23,420 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 182ms, sequenceid=11, compaction requested=false 2024-12-03T04:59:23,430 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-03T04:59:23,431 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T04:59:23,431 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T04:59:23,431 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733201963238Running coprocessor pre-close hooks at 1733201963238Disabling compacts and flushes for region at 1733201963238Disabling writes for close at 1733201963238Obtaining lock to block concurrent updates at 1733201963238Preparing flush snapshotting stores in 1588230740 at 1733201963238Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733201963238Flushing stores of hbase:meta,,1.1588230740 at 1733201963266 (+28 ms)Flushing 1588230740/info: creating writer at 1733201963266Flushing 1588230740/info: appending metadata at 1733201963292 (+26 ms)Flushing 1588230740/info: closing flushed file at 1733201963292Flushing 1588230740/ns: creating writer at 1733201963310 (+18 ms)Flushing 1588230740/ns: appending metadata at 1733201963325 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733201963325Flushing 1588230740/table: creating writer at 1733201963344 (+19 ms)Flushing 1588230740/table: appending metadata at 1733201963358 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733201963358Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a5eccf3: reopening flushed file at 1733201963387 (+29 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@59f135cd: reopening flushed file at 1733201963401 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b029245: reopening flushed file at 1733201963409 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 182ms, sequenceid=11, compaction requested=false at 1733201963420 (+11 ms)Writing region close event to WAL at 1733201963425 (+5 ms)Running coprocessor post-close hooks at 1733201963430 (+5 ms)Closed at 1733201963431 (+1 ms) 2024-12-03T04:59:23,431 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T04:59:23,438 INFO [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer(976): stopping server 32beccd76b35,44203,1733201924824; all regions closed. 2024-12-03T04:59:23,438 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:23,438 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:23,439 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:23,439 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:23,439 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:23,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741897_1082 (size=825) 2024-12-03T04:59:23,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34499 is added to blk_1073741897_1082 (size=825) 2024-12-03T04:59:23,978 INFO [regionserver/32beccd76b35:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T04:59:23,979 INFO [regionserver/32beccd76b35:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T04:59:23,983 INFO [regionserver/32beccd76b35:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T04:59:24,212 INFO [regionserver/32beccd76b35:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T04:59:24,212 INFO [regionserver/32beccd76b35:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T04:59:25,212 INFO [regionserver/32beccd76b35:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T04:59:26,295 INFO [master/32beccd76b35:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T04:59:26,295 INFO [master/32beccd76b35:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T04:59:26,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741832_1008 (size=32) 2024-12-03T04:59:26,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741836_1012 (size=76) 2024-12-03T04:59:26,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34499 is added to blk_1073741863_1046 (size=13591) 2024-12-03T04:59:27,240 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 after 4002ms 2024-12-03T04:59:27,267 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta after 4002ms 2024-12-03T04:59:27,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741828_1004 (size=1189) 2024-12-03T04:59:27,632 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4c97175 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-851166150-172.17.0.3-1733201923677:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:45599,null,null]) java.net.ConnectException: Call From 32beccd76b35/172.17.0.3 to localhost:44181 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-03T04:59:28,238 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-03T04:59:28,241 DEBUG [RS:1;32beccd76b35:38103 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/oldWALs 2024-12-03T04:59:28,241 INFO [RS:1;32beccd76b35:38103 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 32beccd76b35%2C38103%2C1733201925919:(num 1733201926132) 2024-12-03T04:59:28,241 DEBUG [RS:1;32beccd76b35:38103 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:59:28,241 INFO [RS:1;32beccd76b35:38103 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T04:59:28,241 INFO [RS:1;32beccd76b35:38103 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T04:59:28,241 INFO [RS:1;32beccd76b35:38103 {}] hbase.ChoreService(370): Chore service for: regionserver/32beccd76b35:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T04:59:28,241 INFO [RS:1;32beccd76b35:38103 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T04:59:28,241 INFO [RS:1;32beccd76b35:38103 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T04:59:28,241 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T04:59:28,242 INFO [RS:1;32beccd76b35:38103 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T04:59:28,242 INFO [RS:1;32beccd76b35:38103 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T04:59:28,242 INFO [RS:1;32beccd76b35:38103 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38103 2024-12-03T04:59:28,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38103-0x101517586e10002, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/32beccd76b35,38103,1733201925919 2024-12-03T04:59:28,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T04:59:28,244 INFO [RS:1;32beccd76b35:38103 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T04:59:28,246 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [32beccd76b35,38103,1733201925919] 2024-12-03T04:59:28,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor115.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:28,248 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/32beccd76b35,38103,1733201925919 already deleted, retry=false 2024-12-03T04:59:28,248 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 32beccd76b35,38103,1733201925919 expired; onlineServers=1 2024-12-03T04:59:28,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,316 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,316 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,316 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,316 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,317 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,324 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38103-0x101517586e10002, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T04:59:28,346 INFO [RS:1;32beccd76b35:38103 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T04:59:28,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38103-0x101517586e10002, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T04:59:28,346 INFO [RS:1;32beccd76b35:38103 {}] regionserver.HRegionServer(1031): Exiting; stopping=32beccd76b35,38103,1733201925919; zookeeper connection closed. 2024-12-03T04:59:28,347 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7d37e119 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7d37e119 2024-12-03T04:59:28,440 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-03T04:59:28,443 DEBUG [RS:0;32beccd76b35:44203 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/oldWALs 2024-12-03T04:59:28,444 INFO [RS:0;32beccd76b35:44203 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 32beccd76b35%2C44203%2C1733201924824.meta:.meta(num 1733201963239) 2024-12-03T04:59:28,444 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:28,444 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:28,444 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:28,445 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:28,445 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:28,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741884_1068 (size=15140) 2024-12-03T04:59:28,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34499 is added to blk_1073741884_1068 (size=15140) 2024-12-03T04:59:28,452 DEBUG [RS:0;32beccd76b35:44203 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/oldWALs 2024-12-03T04:59:28,452 INFO [RS:0;32beccd76b35:44203 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 32beccd76b35%2C44203%2C1733201924824:(num 1733201962963) 2024-12-03T04:59:28,452 DEBUG [RS:0;32beccd76b35:44203 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:59:28,452 INFO [RS:0;32beccd76b35:44203 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T04:59:28,452 INFO [RS:0;32beccd76b35:44203 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T04:59:28,453 INFO [RS:0;32beccd76b35:44203 {}] hbase.ChoreService(370): Chore service for: regionserver/32beccd76b35:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T04:59:28,453 INFO [RS:0;32beccd76b35:44203 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T04:59:28,453 INFO [RS:0;32beccd76b35:44203 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44203 2024-12-03T04:59:28,453 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T04:59:28,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/32beccd76b35,44203,1733201924824 2024-12-03T04:59:28,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T04:59:28,457 INFO [RS:0;32beccd76b35:44203 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T04:59:28,459 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [32beccd76b35,44203,1733201924824] 2024-12-03T04:59:28,461 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/32beccd76b35,44203,1733201924824 already deleted, retry=false 2024-12-03T04:59:28,461 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 32beccd76b35,44203,1733201924824 expired; onlineServers=0 2024-12-03T04:59:28,461 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '32beccd76b35,35531,1733201924718' ***** 2024-12-03T04:59:28,461 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T04:59:28,461 INFO [M:0;32beccd76b35:35531 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T04:59:28,461 INFO [M:0;32beccd76b35:35531 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T04:59:28,461 DEBUG [M:0;32beccd76b35:35531 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T04:59:28,462 DEBUG [M:0;32beccd76b35:35531 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T04:59:28,462 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T04:59:28,462 DEBUG [master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733201925076 {}] cleaner.HFileCleaner(306): Exit Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733201925076,5,FailOnTimeoutGroup] 2024-12-03T04:59:28,462 DEBUG [master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733201925076 {}] cleaner.HFileCleaner(306): Exit Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733201925076,5,FailOnTimeoutGroup] 2024-12-03T04:59:28,462 INFO [M:0;32beccd76b35:35531 {}] hbase.ChoreService(370): Chore service for: master/32beccd76b35:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T04:59:28,462 INFO [M:0;32beccd76b35:35531 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T04:59:28,462 DEBUG [M:0;32beccd76b35:35531 {}] master.HMaster(1795): Stopping service threads 2024-12-03T04:59:28,462 INFO [M:0;32beccd76b35:35531 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T04:59:28,462 INFO [M:0;32beccd76b35:35531 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T04:59:28,463 INFO [M:0;32beccd76b35:35531 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T04:59:28,463 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T04:59:28,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T04:59:28,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:28,464 DEBUG [M:0;32beccd76b35:35531 {}] zookeeper.ZKUtil(347): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T04:59:28,464 WARN [M:0;32beccd76b35:35531 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T04:59:28,465 INFO [M:0;32beccd76b35:35531 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/.lastflushedseqids 2024-12-03T04:59:28,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741907_1093 (size=130) 2024-12-03T04:59:28,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34499 is added to blk_1073741907_1093 (size=130) 2024-12-03T04:59:28,475 INFO [M:0;32beccd76b35:35531 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T04:59:28,475 INFO [M:0;32beccd76b35:35531 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T04:59:28,475 DEBUG [M:0;32beccd76b35:35531 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T04:59:28,476 INFO [M:0;32beccd76b35:35531 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:59:28,476 DEBUG [M:0;32beccd76b35:35531 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:59:28,476 DEBUG [M:0;32beccd76b35:35531 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T04:59:28,476 DEBUG [M:0;32beccd76b35:35531 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:59:28,476 INFO [M:0;32beccd76b35:35531 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-12-03T04:59:28,495 DEBUG [M:0;32beccd76b35:35531 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c51006285be4442e9dd8d08c10549202 is 82, key is hbase:meta,,1/info:regioninfo/1733201925764/Put/seqid=0 2024-12-03T04:59:28,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741908_1094 (size=5672) 2024-12-03T04:59:28,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34499 is added to blk_1073741908_1094 (size=5672) 2024-12-03T04:59:28,508 INFO [M:0;32beccd76b35:35531 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c51006285be4442e9dd8d08c10549202 2024-12-03T04:59:28,538 DEBUG [M:0;32beccd76b35:35531 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5f00db2b336847e2a18567c51501c35f is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733201926424/Put/seqid=0 2024-12-03T04:59:28,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34499 is added to blk_1073741909_1095 (size=6255) 2024-12-03T04:59:28,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741909_1095 (size=6255) 2024-12-03T04:59:28,545 INFO [M:0;32beccd76b35:35531 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5f00db2b336847e2a18567c51501c35f 2024-12-03T04:59:28,552 INFO [M:0;32beccd76b35:35531 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5f00db2b336847e2a18567c51501c35f 2024-12-03T04:59:28,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T04:59:28,560 INFO [RS:0;32beccd76b35:44203 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T04:59:28,560 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44203-0x101517586e10001, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T04:59:28,560 INFO [RS:0;32beccd76b35:44203 {}] regionserver.HRegionServer(1031): Exiting; stopping=32beccd76b35,44203,1733201924824; zookeeper connection closed. 2024-12-03T04:59:28,560 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1d395049 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1d395049 2024-12-03T04:59:28,560 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-03T04:59:28,572 DEBUG [M:0;32beccd76b35:35531 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/23ca378d73914ac996b1bbd813d2faee is 69, key is 32beccd76b35,38103,1733201925919/rs:state/1733201925963/Put/seqid=0 2024-12-03T04:59:28,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34499 is added to blk_1073741910_1096 (size=5224) 2024-12-03T04:59:28,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741910_1096 (size=5224) 2024-12-03T04:59:28,578 INFO [M:0;32beccd76b35:35531 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/23ca378d73914ac996b1bbd813d2faee 2024-12-03T04:59:28,607 DEBUG [M:0;32beccd76b35:35531 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e7de57036d95499291b0742fe34eb3a1 is 52, key is load_balancer_on/state:d/1733201925902/Put/seqid=0 2024-12-03T04:59:28,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741911_1097 (size=5056) 2024-12-03T04:59:28,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34499 is added to blk_1073741911_1097 (size=5056) 2024-12-03T04:59:28,613 INFO [M:0;32beccd76b35:35531 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e7de57036d95499291b0742fe34eb3a1 2024-12-03T04:59:28,620 DEBUG [M:0;32beccd76b35:35531 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c51006285be4442e9dd8d08c10549202 as hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c51006285be4442e9dd8d08c10549202 2024-12-03T04:59:28,625 INFO [M:0;32beccd76b35:35531 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c51006285be4442e9dd8d08c10549202, entries=8, sequenceid=60, filesize=5.5 K 2024-12-03T04:59:28,626 DEBUG [M:0;32beccd76b35:35531 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5f00db2b336847e2a18567c51501c35f as hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5f00db2b336847e2a18567c51501c35f 2024-12-03T04:59:28,632 INFO [M:0;32beccd76b35:35531 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5f00db2b336847e2a18567c51501c35f 2024-12-03T04:59:28,632 INFO [M:0;32beccd76b35:35531 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5f00db2b336847e2a18567c51501c35f, entries=6, sequenceid=60, filesize=6.1 K 2024-12-03T04:59:28,633 DEBUG [M:0;32beccd76b35:35531 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/23ca378d73914ac996b1bbd813d2faee as hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/23ca378d73914ac996b1bbd813d2faee 2024-12-03T04:59:28,639 INFO [M:0;32beccd76b35:35531 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/23ca378d73914ac996b1bbd813d2faee, entries=2, sequenceid=60, filesize=5.1 K 2024-12-03T04:59:28,640 DEBUG [M:0;32beccd76b35:35531 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e7de57036d95499291b0742fe34eb3a1 as hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e7de57036d95499291b0742fe34eb3a1 2024-12-03T04:59:28,646 INFO [M:0;32beccd76b35:35531 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e7de57036d95499291b0742fe34eb3a1, entries=1, sequenceid=60, filesize=4.9 K 2024-12-03T04:59:28,647 INFO [M:0;32beccd76b35:35531 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 171ms, sequenceid=60, compaction requested=false 2024-12-03T04:59:28,649 INFO [M:0;32beccd76b35:35531 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:59:28,650 DEBUG [M:0;32beccd76b35:35531 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733201968475Disabling compacts and flushes for region at 1733201968475Disabling writes for close at 1733201968476 (+1 ms)Obtaining lock to block concurrent updates at 1733201968476Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733201968476Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1733201968476Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733201968477 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733201968477Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733201968494 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733201968494Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733201968517 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733201968537 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733201968537Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733201968552 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733201968571 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733201968571Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733201968584 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733201968606 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733201968606Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@130960f0: reopening flushed file at 1733201968619 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5acccf93: reopening flushed file at 1733201968626 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d771169: reopening flushed file at 1733201968632 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1fea2cf2: reopening flushed file at 1733201968640 (+8 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 171ms, sequenceid=60, compaction requested=false at 1733201968647 (+7 ms)Writing region close event to WAL at 1733201968649 (+2 ms)Closed at 1733201968649 2024-12-03T04:59:28,650 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:28,650 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:28,650 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:28,650 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:28,651 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:28,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34499 is added to blk_1073741882_1065 (size=1045) 2024-12-03T04:59:28,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35215 is added to blk_1073741882_1065 (size=1045) 2024-12-03T04:59:28,654 INFO [M:0;32beccd76b35:35531 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T04:59:28,654 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T04:59:28,654 INFO [M:0;32beccd76b35:35531 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35531 2024-12-03T04:59:28,655 INFO [M:0;32beccd76b35:35531 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T04:59:28,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T04:59:28,757 INFO [M:0;32beccd76b35:35531 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T04:59:28,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35531-0x101517586e10000, quorum=127.0.0.1:50617, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T04:59:28,760 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e3dea39{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:59:28,760 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@73c11417{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:59:28,761 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:59:28,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31d8af0c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:59:28,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@14a36e87{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.log.dir/,STOPPED} 2024-12-03T04:59:28,767 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T04:59:28,767 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T04:59:28,767 WARN [BP-851166150-172.17.0.3-1733201923677 heartbeating to localhost/127.0.0.1:38443 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T04:59:28,767 WARN [BP-851166150-172.17.0.3-1733201923677 heartbeating to localhost/127.0.0.1:38443 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-851166150-172.17.0.3-1733201923677 (Datanode Uuid b8f66bf4-78e5-4a33-b801-cf0b5bb96189) service to localhost/127.0.0.1:38443 2024-12-03T04:59:28,768 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7c13b1ce {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-851166150-172.17.0.3-1733201923677:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:45599,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:44181 , LocalHost:localPort 32beccd76b35/172.17.0.3:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-03T04:59:28,768 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data3/current/BP-851166150-172.17.0.3-1733201923677 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:59:28,768 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7c13b1ce {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-851166150-172.17.0.3-1733201923677:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:34499,null,null], DatanodeInfoWithStorage[127.0.0.1:45599,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-851166150-172.17.0.3-1733201923677 2024-12-03T04:59:28,768 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data4/current/BP-851166150-172.17.0.3-1733201923677 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:59:28,768 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7c13b1ce {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-851166150-172.17.0.3-1733201923677:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:34499,null,null]) java.io.IOException: No block pool offer service for bpid=BP-851166150-172.17.0.3-1733201923677 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:28,769 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7c13b1ce {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-851166150-172.17.0.3-1733201923677:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:45599,null,null]) java.io.IOException: No block pool offer service for bpid=BP-851166150-172.17.0.3-1733201923677 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:28,769 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T04:59:28,769 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7c13b1ce {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-851166150-172.17.0.3-1733201923677:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:34499,null,null], DatanodeInfoWithStorage[127.0.0.1:45599,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-851166150-172.17.0.3-1733201923677:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:34499,null,null], DatanodeInfoWithStorage[127.0.0.1:45599,null,null]] 2024-12-03T04:59:28,772 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@67ec96b7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:59:28,773 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@664e0b4e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:59:28,773 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:59:28,773 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79eaed89{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:59:28,773 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5979cbd4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.log.dir/,STOPPED} 2024-12-03T04:59:28,774 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T04:59:28,774 WARN [BP-851166150-172.17.0.3-1733201923677 heartbeating to localhost/127.0.0.1:38443 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T04:59:28,774 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T04:59:28,774 WARN [BP-851166150-172.17.0.3-1733201923677 heartbeating to localhost/127.0.0.1:38443 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-851166150-172.17.0.3-1733201923677 (Datanode Uuid 0b88c979-8655-42f1-8356-0f1c738e6cf7) service to localhost/127.0.0.1:38443 2024-12-03T04:59:28,775 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data7/current/BP-851166150-172.17.0.3-1733201923677 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:59:28,775 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/cluster_db4ad155-bfcb-fa87-e3b7-6a42724b38db/data/data8/current/BP-851166150-172.17.0.3-1733201923677 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:59:28,775 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T04:59:28,781 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b7ed0d6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T04:59:28,781 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3a9c918{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:59:28,782 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:59:28,782 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a18dcd3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:59:28,782 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f3a2d0b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.log.dir/,STOPPED} 2024-12-03T04:59:28,794 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T04:59:28,828 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T04:59:28,831 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T04:59:28,840 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38443 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:38443 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fec8cbef840.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:38443 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:41733 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:38443 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41733 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:38443 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:38443 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38443 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:38443 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38443 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38443 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38443 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fec8cbef840.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 405) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=494 (was 367) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 12), AvailableMemoryMB=4819 (was 5000) 2024-12-03T04:59:28,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,848 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=494, ProcessCount=11, AvailableMemoryMB=4819 2024-12-03T04:59:28,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T04:59:28,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.log.dir so I do NOT create it in target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12 2024-12-03T04:59:28,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/633f99b7-11c3-5b63-0b1f-0567af1a7a32/hadoop.tmp.dir so I do NOT create it in target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12 2024-12-03T04:59:28,849 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd, deleteOnExit=true 2024-12-03T04:59:28,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T04:59:28,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/test.cache.data in system properties and HBase conf 2024-12-03T04:59:28,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T04:59:28,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.log.dir in system properties and HBase conf 2024-12-03T04:59:28,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T04:59:28,849 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T04:59:28,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T04:59:28,849 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T04:59:28,849 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T04:59:28,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T04:59:28,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T04:59:28,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T04:59:28,850 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T04:59:28,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T04:59:28,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T04:59:28,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T04:59:28,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T04:59:28,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/nfs.dump.dir in system properties and HBase conf 2024-12-03T04:59:28,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/java.io.tmpdir in system properties and HBase conf 2024-12-03T04:59:28,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T04:59:28,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T04:59:28,850 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T04:59:28,854 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,855 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,857 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:28,863 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T04:59:28,926 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:59:28,930 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:59:28,931 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:59:28,931 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:59:28,932 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T04:59:28,932 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:59:28,933 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c4c555c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:59:28,933 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7945c5a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:59:29,053 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@57f89277{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/java.io.tmpdir/jetty-localhost-39607-hadoop-hdfs-3_4_1-tests_jar-_-any-15108560242452248463/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T04:59:29,053 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2342942b{HTTP/1.1, (http/1.1)}{localhost:39607} 2024-12-03T04:59:29,053 INFO [Time-limited test {}] server.Server(415): Started @153216ms 2024-12-03T04:59:29,067 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T04:59:29,124 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:59:29,127 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:59:29,127 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:59:29,128 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:59:29,128 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T04:59:29,128 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b5cdfcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:59:29,129 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15281d4e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:59:29,246 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@63be899e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/java.io.tmpdir/jetty-localhost-35277-hadoop-hdfs-3_4_1-tests_jar-_-any-10904988444479826039/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:59:29,247 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f7e1003{HTTP/1.1, (http/1.1)}{localhost:35277} 2024-12-03T04:59:29,247 INFO [Time-limited test {}] server.Server(415): Started @153409ms 2024-12-03T04:59:29,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:29,249 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T04:59:29,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:29,299 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:59:29,310 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:59:29,311 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:59:29,311 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:59:29,311 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T04:59:29,312 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@631f73d8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:59:29,312 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@484325f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:59:29,371 WARN [Thread-1181 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/data/data1/current/BP-329677405-172.17.0.3-1733201968871/current, will proceed with Du for space computation calculation, 2024-12-03T04:59:29,371 WARN [Thread-1182 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/data/data2/current/BP-329677405-172.17.0.3-1733201968871/current, will proceed with Du for space computation calculation, 2024-12-03T04:59:29,395 WARN [Thread-1160 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T04:59:29,398 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe25d2949120205c0 with lease ID 0x4f9e12cc7b00f377: Processing first storage report for DS-84045bae-cc71-4024-9178-1a28caebb905 from datanode DatanodeRegistration(127.0.0.1:42039, datanodeUuid=0f0f8f1a-6f08-4a74-9e72-e877de382ee4, infoPort=44933, infoSecurePort=0, ipcPort=37753, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871) 2024-12-03T04:59:29,398 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe25d2949120205c0 with lease ID 0x4f9e12cc7b00f377: from storage DS-84045bae-cc71-4024-9178-1a28caebb905 node DatanodeRegistration(127.0.0.1:42039, datanodeUuid=0f0f8f1a-6f08-4a74-9e72-e877de382ee4, infoPort=44933, infoSecurePort=0, ipcPort=37753, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T04:59:29,398 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe25d2949120205c0 with lease ID 0x4f9e12cc7b00f377: Processing first storage report for DS-fb71848e-c666-4650-a644-d56c419475e4 from datanode DatanodeRegistration(127.0.0.1:42039, datanodeUuid=0f0f8f1a-6f08-4a74-9e72-e877de382ee4, infoPort=44933, infoSecurePort=0, ipcPort=37753, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871) 2024-12-03T04:59:29,398 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe25d2949120205c0 with lease ID 0x4f9e12cc7b00f377: from storage DS-fb71848e-c666-4650-a644-d56c419475e4 node DatanodeRegistration(127.0.0.1:42039, datanodeUuid=0f0f8f1a-6f08-4a74-9e72-e877de382ee4, infoPort=44933, infoSecurePort=0, ipcPort=37753, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:59:29,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@784d9fd7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/java.io.tmpdir/jetty-localhost-40697-hadoop-hdfs-3_4_1-tests_jar-_-any-9775646823021837329/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:59:29,435 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6dd025c5{HTTP/1.1, (http/1.1)}{localhost:40697} 2024-12-03T04:59:29,436 INFO [Time-limited test {}] server.Server(415): Started @153598ms 2024-12-03T04:59:29,437 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T04:59:29,538 WARN [Thread-1207 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/data/data3/current/BP-329677405-172.17.0.3-1733201968871/current, will proceed with Du for space computation calculation, 2024-12-03T04:59:29,538 WARN [Thread-1208 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/data/data4/current/BP-329677405-172.17.0.3-1733201968871/current, will proceed with Du for space computation calculation, 2024-12-03T04:59:29,559 WARN [Thread-1196 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T04:59:29,563 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1140089e80805a3e with lease ID 0x4f9e12cc7b00f378: Processing first storage report for DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f from datanode DatanodeRegistration(127.0.0.1:40961, datanodeUuid=437a725a-5891-4a4e-8fa8-92e4661703dc, infoPort=41091, infoSecurePort=0, ipcPort=40319, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871) 2024-12-03T04:59:29,563 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1140089e80805a3e with lease ID 0x4f9e12cc7b00f378: from storage DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f node DatanodeRegistration(127.0.0.1:40961, datanodeUuid=437a725a-5891-4a4e-8fa8-92e4661703dc, infoPort=41091, infoSecurePort=0, ipcPort=40319, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:59:29,563 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1140089e80805a3e with lease ID 0x4f9e12cc7b00f378: Processing first storage report for DS-d507e67d-8b43-42f5-8ab1-bb2d03c4e863 from datanode DatanodeRegistration(127.0.0.1:40961, datanodeUuid=437a725a-5891-4a4e-8fa8-92e4661703dc, infoPort=41091, infoSecurePort=0, ipcPort=40319, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871) 2024-12-03T04:59:29,563 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1140089e80805a3e with lease ID 0x4f9e12cc7b00f378: from storage DS-d507e67d-8b43-42f5-8ab1-bb2d03c4e863 node DatanodeRegistration(127.0.0.1:40961, datanodeUuid=437a725a-5891-4a4e-8fa8-92e4661703dc, infoPort=41091, infoSecurePort=0, ipcPort=40319, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:59:29,566 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12 2024-12-03T04:59:29,569 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/zookeeper_0, clientPort=58503, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T04:59:29,573 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58503 2024-12-03T04:59:29,573 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:59:29,574 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:59:29,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40961 is added to blk_1073741825_1001 (size=7) 2024-12-03T04:59:29,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741825_1001 (size=7) 2024-12-03T04:59:29,587 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac with version=8 2024-12-03T04:59:29,587 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/hbase-staging 2024-12-03T04:59:29,590 INFO [Time-limited test {}] client.ConnectionUtils(128): master/32beccd76b35:0 server-side Connection retries=45 2024-12-03T04:59:29,590 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:59:29,590 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T04:59:29,590 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T04:59:29,590 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:59:29,590 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T04:59:29,590 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T04:59:29,590 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T04:59:29,591 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42965 2024-12-03T04:59:29,592 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42965 connecting to ZooKeeper ensemble=127.0.0.1:58503 2024-12-03T04:59:29,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:429650x0, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T04:59:29,600 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42965-0x1015176364a0000 connected 2024-12-03T04:59:29,628 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:59:29,630 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:59:29,633 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:59:29,633 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac, hbase.cluster.distributed=false 2024-12-03T04:59:29,635 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T04:59:29,636 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42965 2024-12-03T04:59:29,637 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42965 2024-12-03T04:59:29,637 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42965 2024-12-03T04:59:29,639 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42965 2024-12-03T04:59:29,639 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42965 2024-12-03T04:59:29,665 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/32beccd76b35:0 server-side Connection retries=45 2024-12-03T04:59:29,665 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:59:29,665 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T04:59:29,665 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T04:59:29,665 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T04:59:29,665 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T04:59:29,665 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T04:59:29,665 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T04:59:29,666 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40541 2024-12-03T04:59:29,668 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40541 connecting to ZooKeeper ensemble=127.0.0.1:58503 2024-12-03T04:59:29,669 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:59:29,671 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:59:29,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:405410x0, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T04:59:29,676 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40541-0x1015176364a0001 connected 2024-12-03T04:59:29,676 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:59:29,676 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T04:59:29,677 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T04:59:29,677 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T04:59:29,679 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T04:59:29,679 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40541 2024-12-03T04:59:29,679 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40541 2024-12-03T04:59:29,680 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40541 2024-12-03T04:59:29,680 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40541 2024-12-03T04:59:29,680 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40541 2024-12-03T04:59:29,697 DEBUG [M:0;32beccd76b35:42965 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;32beccd76b35:42965 2024-12-03T04:59:29,697 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/32beccd76b35,42965,1733201969589 2024-12-03T04:59:29,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T04:59:29,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T04:59:29,700 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/32beccd76b35,42965,1733201969589 2024-12-03T04:59:29,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T04:59:29,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:29,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:29,704 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T04:59:29,705 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/32beccd76b35,42965,1733201969589 from backup master directory 2024-12-03T04:59:29,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/32beccd76b35,42965,1733201969589 2024-12-03T04:59:29,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T04:59:29,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T04:59:29,706 WARN [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T04:59:29,707 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=32beccd76b35,42965,1733201969589 2024-12-03T04:59:29,712 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/hbase.id] with ID: df9a2d2b-7bc6-4ed5-b1f1-90214757d1c1 2024-12-03T04:59:29,712 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/.tmp/hbase.id 2024-12-03T04:59:29,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40961 is added to blk_1073741826_1002 (size=42) 2024-12-03T04:59:29,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741826_1002 (size=42) 2024-12-03T04:59:29,728 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/.tmp/hbase.id]:[hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/hbase.id] 2024-12-03T04:59:29,747 INFO [master/32beccd76b35:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:59:29,747 INFO [master/32beccd76b35:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T04:59:29,749 INFO [master/32beccd76b35:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-03T04:59:29,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:29,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:29,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741827_1003 (size=196) 2024-12-03T04:59:29,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40961 is added to blk_1073741827_1003 (size=196) 2024-12-03T04:59:29,762 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T04:59:29,763 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T04:59:29,764 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T04:59:29,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40961 is added to blk_1073741828_1004 (size=1189) 2024-12-03T04:59:29,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741828_1004 (size=1189) 2024-12-03T04:59:29,775 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store 2024-12-03T04:59:29,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741829_1005 (size=34) 2024-12-03T04:59:29,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40961 is added to blk_1073741829_1005 (size=34) 2024-12-03T04:59:29,791 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:59:29,791 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T04:59:29,791 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:59:29,791 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:59:29,791 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T04:59:29,791 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:59:29,791 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T04:59:29,792 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733201969791Disabling compacts and flushes for region at 1733201969791Disabling writes for close at 1733201969791Writing region close event to WAL at 1733201969791Closed at 1733201969791 2024-12-03T04:59:29,792 WARN [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/.initializing 2024-12-03T04:59:29,792 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/WALs/32beccd76b35,42965,1733201969589 2024-12-03T04:59:29,796 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C42965%2C1733201969589, suffix=, logDir=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/WALs/32beccd76b35,42965,1733201969589, archiveDir=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/oldWALs, maxLogs=10 2024-12-03T04:59:29,796 INFO [master/32beccd76b35:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C42965%2C1733201969589.1733201969796 2024-12-03T04:59:29,804 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/WALs/32beccd76b35,42965,1733201969589/32beccd76b35%2C42965%2C1733201969589.1733201969796 2024-12-03T04:59:29,805 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44933:44933),(127.0.0.1/127.0.0.1:41091:41091)] 2024-12-03T04:59:29,821 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T04:59:29,821 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:59:29,822 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:59:29,822 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:59:29,823 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:59:29,825 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T04:59:29,825 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:59:29,825 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:59:29,825 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:59:29,829 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T04:59:29,829 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:59:29,829 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T04:59:29,829 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:59:29,831 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T04:59:29,831 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:59:29,831 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T04:59:29,831 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:59:29,833 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T04:59:29,833 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:59:29,833 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T04:59:29,833 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:59:29,834 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:59:29,835 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:59:29,837 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:59:29,837 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:59:29,837 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T04:59:29,839 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T04:59:29,843 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T04:59:29,844 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=854984, jitterRate=0.08716961741447449}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T04:59:29,845 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733201969822Initializing all the Stores at 1733201969823 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201969823Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201969823Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201969823Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201969823Cleaning up temporary data from old regions at 1733201969837 (+14 ms)Region opened successfully at 1733201969845 (+8 ms) 2024-12-03T04:59:29,845 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T04:59:29,852 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7013c4ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32beccd76b35/172.17.0.3:0 2024-12-03T04:59:29,853 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T04:59:29,853 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T04:59:29,853 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T04:59:29,853 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T04:59:29,854 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T04:59:29,855 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T04:59:29,855 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T04:59:29,867 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T04:59:29,868 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T04:59:29,870 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T04:59:29,870 INFO [master/32beccd76b35:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T04:59:29,871 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T04:59:29,873 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T04:59:29,873 INFO [master/32beccd76b35:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T04:59:29,878 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T04:59:29,879 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T04:59:29,881 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T04:59:29,883 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T04:59:29,885 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T04:59:29,886 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T04:59:29,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T04:59:29,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T04:59:29,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:29,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:29,889 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=32beccd76b35,42965,1733201969589, sessionid=0x1015176364a0000, setting cluster-up flag (Was=false) 2024-12-03T04:59:29,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:29,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:29,902 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T04:59:29,903 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=32beccd76b35,42965,1733201969589 2024-12-03T04:59:29,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:29,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:29,912 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T04:59:29,913 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=32beccd76b35,42965,1733201969589 2024-12-03T04:59:29,914 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T04:59:29,916 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T04:59:29,916 INFO [master/32beccd76b35:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T04:59:29,916 INFO [master/32beccd76b35:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T04:59:29,917 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 32beccd76b35,42965,1733201969589 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T04:59:29,919 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T04:59:29,919 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T04:59:29,919 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T04:59:29,919 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T04:59:29,919 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/32beccd76b35:0, corePoolSize=10, maxPoolSize=10 2024-12-03T04:59:29,919 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:59:29,919 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/32beccd76b35:0, corePoolSize=2, maxPoolSize=2 2024-12-03T04:59:29,919 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:59:29,921 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T04:59:29,922 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T04:59:29,923 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:59:29,923 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T04:59:29,924 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733201999924 2024-12-03T04:59:29,924 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T04:59:29,924 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T04:59:29,924 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T04:59:29,924 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T04:59:29,924 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T04:59:29,924 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T04:59:29,925 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:29,925 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T04:59:29,925 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T04:59:29,925 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T04:59:29,926 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T04:59:29,926 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T04:59:29,926 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733201969926,5,FailOnTimeoutGroup] 2024-12-03T04:59:29,926 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733201969926,5,FailOnTimeoutGroup] 2024-12-03T04:59:29,926 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:29,926 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T04:59:29,926 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:29,926 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:29,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40961 is added to blk_1073741831_1007 (size=1321) 2024-12-03T04:59:29,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741831_1007 (size=1321) 2024-12-03T04:59:29,933 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T04:59:29,933 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac 2024-12-03T04:59:29,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741832_1008 (size=32) 2024-12-03T04:59:29,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40961 is added to blk_1073741832_1008 (size=32) 2024-12-03T04:59:29,942 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:59:29,943 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T04:59:29,945 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T04:59:29,945 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:59:29,946 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:59:29,946 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T04:59:29,947 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T04:59:29,947 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:59:29,948 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:59:29,948 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T04:59:29,949 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T04:59:29,949 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:59:29,950 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:59:29,950 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T04:59:29,951 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T04:59:29,951 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:59:29,952 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:59:29,952 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T04:59:29,953 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740 2024-12-03T04:59:29,953 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740 2024-12-03T04:59:29,955 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T04:59:29,955 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T04:59:29,956 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T04:59:29,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T04:59:29,960 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T04:59:29,960 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=850538, jitterRate=0.08151555061340332}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T04:59:29,961 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733201969942Initializing all the Stores at 1733201969943 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201969943Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201969943Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201969943Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201969943Cleaning up temporary data from old regions at 1733201969955 (+12 ms)Region opened successfully at 1733201969961 (+6 ms) 2024-12-03T04:59:29,961 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T04:59:29,961 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T04:59:29,961 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T04:59:29,961 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T04:59:29,961 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T04:59:29,965 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T04:59:29,965 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733201969961Disabling compacts and flushes for region at 1733201969961Disabling writes for close at 1733201969961Writing region close event to WAL at 1733201969965 (+4 ms)Closed at 1733201969965 2024-12-03T04:59:29,967 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T04:59:29,967 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T04:59:29,967 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T04:59:29,968 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T04:59:29,970 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T04:59:29,982 INFO [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer(746): ClusterId : df9a2d2b-7bc6-4ed5-b1f1-90214757d1c1 2024-12-03T04:59:29,982 DEBUG [RS:0;32beccd76b35:40541 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T04:59:29,984 DEBUG [RS:0;32beccd76b35:40541 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T04:59:29,984 DEBUG [RS:0;32beccd76b35:40541 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T04:59:29,986 DEBUG [RS:0;32beccd76b35:40541 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T04:59:29,987 DEBUG [RS:0;32beccd76b35:40541 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e15d525, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32beccd76b35/172.17.0.3:0 2024-12-03T04:59:30,000 DEBUG [RS:0;32beccd76b35:40541 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;32beccd76b35:40541 2024-12-03T04:59:30,000 INFO [RS:0;32beccd76b35:40541 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T04:59:30,000 INFO [RS:0;32beccd76b35:40541 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T04:59:30,000 DEBUG [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T04:59:30,001 INFO [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer(2659): reportForDuty to master=32beccd76b35,42965,1733201969589 with port=40541, startcode=1733201969664 2024-12-03T04:59:30,001 DEBUG [RS:0;32beccd76b35:40541 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T04:59:30,005 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35643, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T04:59:30,005 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42965 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 32beccd76b35,40541,1733201969664 2024-12-03T04:59:30,005 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42965 {}] master.ServerManager(517): Registering regionserver=32beccd76b35,40541,1733201969664 2024-12-03T04:59:30,007 DEBUG [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac 2024-12-03T04:59:30,007 DEBUG [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46177 2024-12-03T04:59:30,007 DEBUG [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T04:59:30,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T04:59:30,010 DEBUG [RS:0;32beccd76b35:40541 {}] zookeeper.ZKUtil(111): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/32beccd76b35,40541,1733201969664 2024-12-03T04:59:30,010 WARN [RS:0;32beccd76b35:40541 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T04:59:30,010 INFO [RS:0;32beccd76b35:40541 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T04:59:30,010 DEBUG [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664 2024-12-03T04:59:30,014 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [32beccd76b35,40541,1733201969664] 2024-12-03T04:59:30,016 INFO [RS:0;32beccd76b35:40541 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T04:59:30,025 INFO [RS:0;32beccd76b35:40541 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T04:59:30,026 INFO [RS:0;32beccd76b35:40541 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T04:59:30,026 INFO [RS:0;32beccd76b35:40541 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,028 INFO [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T04:59:30,029 INFO [RS:0;32beccd76b35:40541 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T04:59:30,029 INFO [RS:0;32beccd76b35:40541 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,029 DEBUG [RS:0;32beccd76b35:40541 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:59:30,029 DEBUG [RS:0;32beccd76b35:40541 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:59:30,029 DEBUG [RS:0;32beccd76b35:40541 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:59:30,029 DEBUG [RS:0;32beccd76b35:40541 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:59:30,030 DEBUG [RS:0;32beccd76b35:40541 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:59:30,030 DEBUG [RS:0;32beccd76b35:40541 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/32beccd76b35:0, corePoolSize=2, maxPoolSize=2 2024-12-03T04:59:30,030 DEBUG [RS:0;32beccd76b35:40541 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:59:30,030 DEBUG [RS:0;32beccd76b35:40541 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:59:30,030 DEBUG [RS:0;32beccd76b35:40541 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:59:30,030 DEBUG [RS:0;32beccd76b35:40541 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:59:30,030 DEBUG [RS:0;32beccd76b35:40541 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:59:30,030 DEBUG [RS:0;32beccd76b35:40541 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T04:59:30,030 DEBUG [RS:0;32beccd76b35:40541 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/32beccd76b35:0, corePoolSize=3, maxPoolSize=3 2024-12-03T04:59:30,030 DEBUG [RS:0;32beccd76b35:40541 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0, corePoolSize=3, maxPoolSize=3 2024-12-03T04:59:30,032 INFO [RS:0;32beccd76b35:40541 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,033 INFO [RS:0;32beccd76b35:40541 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,033 INFO [RS:0;32beccd76b35:40541 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,033 INFO [RS:0;32beccd76b35:40541 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,033 INFO [RS:0;32beccd76b35:40541 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,033 INFO [RS:0;32beccd76b35:40541 {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,40541,1733201969664-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T04:59:30,052 INFO [RS:0;32beccd76b35:40541 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T04:59:30,052 INFO [RS:0;32beccd76b35:40541 {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,40541,1733201969664-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,052 INFO [RS:0;32beccd76b35:40541 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,052 INFO [RS:0;32beccd76b35:40541 {}] regionserver.Replication(171): 32beccd76b35,40541,1733201969664 started 2024-12-03T04:59:30,070 INFO [RS:0;32beccd76b35:40541 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,070 INFO [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer(1482): Serving as 32beccd76b35,40541,1733201969664, RpcServer on 32beccd76b35/172.17.0.3:40541, sessionid=0x1015176364a0001 2024-12-03T04:59:30,070 DEBUG [RS:0;32beccd76b35:40541 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T04:59:30,070 DEBUG [RS:0;32beccd76b35:40541 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 32beccd76b35,40541,1733201969664 2024-12-03T04:59:30,071 DEBUG [RS:0;32beccd76b35:40541 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32beccd76b35,40541,1733201969664' 2024-12-03T04:59:30,071 DEBUG [RS:0;32beccd76b35:40541 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T04:59:30,071 DEBUG [RS:0;32beccd76b35:40541 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T04:59:30,072 DEBUG [RS:0;32beccd76b35:40541 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T04:59:30,072 DEBUG [RS:0;32beccd76b35:40541 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T04:59:30,072 DEBUG [RS:0;32beccd76b35:40541 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 32beccd76b35,40541,1733201969664 2024-12-03T04:59:30,072 DEBUG [RS:0;32beccd76b35:40541 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32beccd76b35,40541,1733201969664' 2024-12-03T04:59:30,072 DEBUG [RS:0;32beccd76b35:40541 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T04:59:30,072 DEBUG [RS:0;32beccd76b35:40541 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T04:59:30,072 DEBUG [RS:0;32beccd76b35:40541 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T04:59:30,072 INFO [RS:0;32beccd76b35:40541 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T04:59:30,072 INFO [RS:0;32beccd76b35:40541 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T04:59:30,120 WARN [32beccd76b35:42965 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T04:59:30,175 INFO [RS:0;32beccd76b35:40541 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C40541%2C1733201969664, suffix=, logDir=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664, archiveDir=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/oldWALs, maxLogs=32 2024-12-03T04:59:30,176 INFO [RS:0;32beccd76b35:40541 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C40541%2C1733201969664.1733201970175 2024-12-03T04:59:30,181 INFO [RS:0;32beccd76b35:40541 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201970175 2024-12-03T04:59:30,182 DEBUG [RS:0;32beccd76b35:40541 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44933:44933),(127.0.0.1/127.0.0.1:41091:41091)] 2024-12-03T04:59:30,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:30,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:30,370 DEBUG [32beccd76b35:42965 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T04:59:30,371 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=32beccd76b35,40541,1733201969664 2024-12-03T04:59:30,372 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 32beccd76b35,40541,1733201969664, state=OPENING 2024-12-03T04:59:30,375 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T04:59:30,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:30,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:30,378 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T04:59:30,378 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=32beccd76b35,40541,1733201969664}] 2024-12-03T04:59:30,378 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T04:59:30,379 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T04:59:30,537 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T04:59:30,539 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53263, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T04:59:30,543 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T04:59:30,543 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T04:59:30,545 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C40541%2C1733201969664.meta, suffix=.meta, logDir=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664, archiveDir=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/oldWALs, maxLogs=32 2024-12-03T04:59:30,545 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C40541%2C1733201969664.meta.1733201970545.meta 2024-12-03T04:59:30,551 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.meta.1733201970545.meta 2024-12-03T04:59:30,552 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41091:41091),(127.0.0.1/127.0.0.1:44933:44933)] 2024-12-03T04:59:30,552 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T04:59:30,553 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T04:59:30,553 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T04:59:30,553 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T04:59:30,553 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T04:59:30,553 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:59:30,553 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T04:59:30,553 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T04:59:30,554 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T04:59:30,555 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T04:59:30,555 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:59:30,556 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:59:30,556 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T04:59:30,556 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T04:59:30,557 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:59:30,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:59:30,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T04:59:30,558 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T04:59:30,558 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:59:30,558 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:59:30,558 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T04:59:30,559 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T04:59:30,559 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:59:30,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T04:59:30,560 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T04:59:30,560 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740 2024-12-03T04:59:30,562 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740 2024-12-03T04:59:30,563 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T04:59:30,563 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T04:59:30,564 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T04:59:30,566 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T04:59:30,567 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=760716, jitterRate=-0.03270027041435242}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T04:59:30,567 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T04:59:30,567 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733201970553Writing region info on filesystem at 1733201970553Initializing all the Stores at 1733201970554 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201970554Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201970554Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201970554Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733201970554Cleaning up temporary data from old regions at 1733201970563 (+9 ms)Running coprocessor post-open hooks at 1733201970567 (+4 ms)Region opened successfully at 1733201970567 2024-12-03T04:59:30,568 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733201970537 2024-12-03T04:59:30,571 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T04:59:30,571 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T04:59:30,572 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=32beccd76b35,40541,1733201969664 2024-12-03T04:59:30,573 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 32beccd76b35,40541,1733201969664, state=OPEN 2024-12-03T04:59:30,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T04:59:30,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T04:59:30,580 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=32beccd76b35,40541,1733201969664 2024-12-03T04:59:30,580 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T04:59:30,580 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T04:59:30,584 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T04:59:30,584 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=32beccd76b35,40541,1733201969664 in 202 msec 2024-12-03T04:59:30,587 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T04:59:30,588 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 617 msec 2024-12-03T04:59:30,588 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T04:59:30,588 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T04:59:30,590 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T04:59:30,590 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=32beccd76b35,40541,1733201969664, seqNum=-1] 2024-12-03T04:59:30,590 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T04:59:30,592 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56881, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T04:59:30,597 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 681 msec 2024-12-03T04:59:30,597 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733201970597, completionTime=-1 2024-12-03T04:59:30,598 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T04:59:30,598 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T04:59:30,600 INFO [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T04:59:30,600 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733202030600 2024-12-03T04:59:30,600 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733202090600 2024-12-03T04:59:30,600 INFO [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-03T04:59:30,601 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,42965,1733201969589-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,601 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,42965,1733201969589-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,601 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,42965,1733201969589-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,601 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-32beccd76b35:42965, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,601 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,602 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,603 DEBUG [master/32beccd76b35:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T04:59:30,608 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.901sec 2024-12-03T04:59:30,608 INFO [master/32beccd76b35:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T04:59:30,608 INFO [master/32beccd76b35:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T04:59:30,608 INFO [master/32beccd76b35:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T04:59:30,608 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T04:59:30,608 INFO [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T04:59:30,608 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,42965,1733201969589-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T04:59:30,608 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,42965,1733201969589-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T04:59:30,615 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T04:59:30,615 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T04:59:30,615 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,42965,1733201969589-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T04:59:30,685 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5dd5f98b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T04:59:30,685 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 32beccd76b35,42965,-1 for getting cluster id 2024-12-03T04:59:30,685 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T04:59:30,687 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'df9a2d2b-7bc6-4ed5-b1f1-90214757d1c1' 2024-12-03T04:59:30,687 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T04:59:30,687 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "df9a2d2b-7bc6-4ed5-b1f1-90214757d1c1" 2024-12-03T04:59:30,688 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@aa55154, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T04:59:30,688 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [32beccd76b35,42965,-1] 2024-12-03T04:59:30,688 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T04:59:30,689 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:59:30,690 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54424, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T04:59:30,691 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@107dbc21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T04:59:30,691 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T04:59:30,693 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=32beccd76b35,40541,1733201969664, seqNum=-1] 2024-12-03T04:59:30,693 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T04:59:30,699 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53898, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T04:59:30,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=32beccd76b35,42965,1733201969589 2024-12-03T04:59:30,701 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T04:59:30,706 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T04:59:30,706 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-03T04:59:30,706 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-03T04:59:30,707 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T04:59:30,708 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 32beccd76b35,42965,1733201969589 2024-12-03T04:59:30,708 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@63bcb61 2024-12-03T04:59:30,708 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T04:59:30,710 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54440, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T04:59:30,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42965 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T04:59:30,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42965 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T04:59:30,711 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42965 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T04:59:30,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42965 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-03T04:59:30,715 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T04:59:30,715 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:59:30,716 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42965 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-03T04:59:30,717 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T04:59:30,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42965 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T04:59:30,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741835_1011 (size=395) 2024-12-03T04:59:30,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40961 is added to blk_1073741835_1011 (size=395) 2024-12-03T04:59:30,758 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 926fee2387dd4e18f8e458c63fad081a, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac 2024-12-03T04:59:30,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42039 is added to blk_1073741836_1012 (size=78) 2024-12-03T04:59:30,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40961 is added to blk_1073741836_1012 (size=78) 2024-12-03T04:59:30,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:59:30,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 926fee2387dd4e18f8e458c63fad081a, disabling compactions & flushes 2024-12-03T04:59:30,810 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a. 2024-12-03T04:59:30,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a. 2024-12-03T04:59:30,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a. after waiting 0 ms 2024-12-03T04:59:30,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a. 2024-12-03T04:59:30,810 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a. 2024-12-03T04:59:30,810 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 926fee2387dd4e18f8e458c63fad081a: Waiting for close lock at 1733201970810Disabling compacts and flushes for region at 1733201970810Disabling writes for close at 1733201970810Writing region close event to WAL at 1733201970810Closed at 1733201970810 2024-12-03T04:59:30,813 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T04:59:30,814 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733201970813"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733201970813"}]},"ts":"1733201970813"} 2024-12-03T04:59:30,826 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T04:59:30,840 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T04:59:30,840 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733201970840"}]},"ts":"1733201970840"} 2024-12-03T04:59:30,843 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-03T04:59:30,844 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=926fee2387dd4e18f8e458c63fad081a, ASSIGN}] 2024-12-03T04:59:30,851 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=926fee2387dd4e18f8e458c63fad081a, ASSIGN 2024-12-03T04:59:30,854 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=926fee2387dd4e18f8e458c63fad081a, ASSIGN; state=OFFLINE, location=32beccd76b35,40541,1733201969664; forceNewPlan=false, retain=false 2024-12-03T04:59:31,005 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=926fee2387dd4e18f8e458c63fad081a, regionState=OPENING, regionLocation=32beccd76b35,40541,1733201969664 2024-12-03T04:59:31,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=926fee2387dd4e18f8e458c63fad081a, ASSIGN because future has completed 2024-12-03T04:59:31,009 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 926fee2387dd4e18f8e458c63fad081a, server=32beccd76b35,40541,1733201969664}] 2024-12-03T04:59:31,170 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a. 2024-12-03T04:59:31,170 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 926fee2387dd4e18f8e458c63fad081a, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a.', STARTKEY => '', ENDKEY => ''} 2024-12-03T04:59:31,171 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 926fee2387dd4e18f8e458c63fad081a 2024-12-03T04:59:31,171 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T04:59:31,171 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 926fee2387dd4e18f8e458c63fad081a 2024-12-03T04:59:31,171 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 926fee2387dd4e18f8e458c63fad081a 2024-12-03T04:59:31,173 INFO [StoreOpener-926fee2387dd4e18f8e458c63fad081a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 926fee2387dd4e18f8e458c63fad081a 2024-12-03T04:59:31,175 INFO [StoreOpener-926fee2387dd4e18f8e458c63fad081a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 926fee2387dd4e18f8e458c63fad081a columnFamilyName info 2024-12-03T04:59:31,175 DEBUG [StoreOpener-926fee2387dd4e18f8e458c63fad081a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T04:59:31,175 INFO [StoreOpener-926fee2387dd4e18f8e458c63fad081a-1 {}] regionserver.HStore(327): Store=926fee2387dd4e18f8e458c63fad081a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T04:59:31,176 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 926fee2387dd4e18f8e458c63fad081a 2024-12-03T04:59:31,177 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/926fee2387dd4e18f8e458c63fad081a 2024-12-03T04:59:31,177 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/926fee2387dd4e18f8e458c63fad081a 2024-12-03T04:59:31,178 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 926fee2387dd4e18f8e458c63fad081a 2024-12-03T04:59:31,178 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 926fee2387dd4e18f8e458c63fad081a 2024-12-03T04:59:31,180 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 926fee2387dd4e18f8e458c63fad081a 2024-12-03T04:59:31,187 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/926fee2387dd4e18f8e458c63fad081a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T04:59:31,187 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 926fee2387dd4e18f8e458c63fad081a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=806272, jitterRate=0.025228142738342285}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T04:59:31,187 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 926fee2387dd4e18f8e458c63fad081a 2024-12-03T04:59:31,189 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 926fee2387dd4e18f8e458c63fad081a: Running coprocessor pre-open hook at 1733201971171Writing region info on filesystem at 1733201971171Initializing all the Stores at 1733201971172 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733201971172Cleaning up temporary data from old regions at 1733201971178 (+6 ms)Running coprocessor post-open hooks at 1733201971187 (+9 ms)Region opened successfully at 1733201971188 (+1 ms) 2024-12-03T04:59:31,190 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a., pid=6, masterSystemTime=1733201971163 2024-12-03T04:59:31,194 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a. 2024-12-03T04:59:31,194 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a. 2024-12-03T04:59:31,195 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=926fee2387dd4e18f8e458c63fad081a, regionState=OPEN, openSeqNum=2, regionLocation=32beccd76b35,40541,1733201969664 2024-12-03T04:59:31,198 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 926fee2387dd4e18f8e458c63fad081a, server=32beccd76b35,40541,1733201969664 because future has completed 2024-12-03T04:59:31,204 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T04:59:31,204 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 926fee2387dd4e18f8e458c63fad081a, server=32beccd76b35,40541,1733201969664 in 192 msec 2024-12-03T04:59:31,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T04:59:31,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=926fee2387dd4e18f8e458c63fad081a, ASSIGN in 360 msec 2024-12-03T04:59:31,216 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T04:59:31,216 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733201971216"}]},"ts":"1733201971216"} 2024-12-03T04:59:31,219 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-03T04:59:31,221 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T04:59:31,223 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 509 msec 2024-12-03T04:59:31,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:31,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:32,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:32,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:32,833 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-03T04:59:32,833 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-03T04:59:32,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-03T04:59:33,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:33,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:34,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:34,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:35,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:35,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:36,056 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T04:59:36,099 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:36,100 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:36,100 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:36,101 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:36,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:36,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:36,106 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:36,106 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:36,107 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:36,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T04:59:36,132 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T04:59:36,133 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-03T04:59:36,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:36,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:37,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:37,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:38,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:38,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:39,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:39,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:40,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:40,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:40,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42965 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T04:59:40,760 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-03T04:59:40,760 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-03T04:59:40,769 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-03T04:59:40,769 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a. 2024-12-03T04:59:40,774 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a., hostname=32beccd76b35,40541,1733201969664, seqNum=2] 2024-12-03T04:59:41,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:41,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:42,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:42,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:42,778 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201970175 2024-12-03T04:59:42,781 WARN [ResponseProcessor for block BP-329677405-172.17.0.3-1733201968871:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-329677405-172.17.0.3-1733201968871:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-329677405-172.17.0.3-1733201968871:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:40961,DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:42,781 WARN [ResponseProcessor for block BP-329677405-172.17.0.3-1733201968871:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-329677405-172.17.0.3-1733201968871:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:42,781 WARN [DataStreamer for file /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201970175 block BP-329677405-172.17.0.3-1733201968871:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-329677405-172.17.0.3-1733201968871:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42039,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK], DatanodeInfoWithStorage[127.0.0.1:40961,DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40961,DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f,DISK]) is bad. 2024-12-03T04:59:42,781 WARN [DataStreamer for file /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.meta.1733201970545.meta block BP-329677405-172.17.0.3-1733201968871:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-329677405-172.17.0.3-1733201968871:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40961,DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f,DISK], DatanodeInfoWithStorage[127.0.0.1:42039,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40961,DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f,DISK]) is bad. 2024-12-03T04:59:42,781 WARN [ResponseProcessor for block BP-329677405-172.17.0.3-1733201968871:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-329677405-172.17.0.3-1733201968871:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-329677405-172.17.0.3-1733201968871:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:40961,DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:42,782 WARN [PacketResponder: BP-329677405-172.17.0.3-1733201968871:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40961] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:42,782 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-252833222_22 at /127.0.0.1:45788 [Receiving block BP-329677405-172.17.0.3-1733201968871:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42039:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45788 dst: /127.0.0.1:42039 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:42,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-252833222_22 at /127.0.0.1:39414 [Receiving block BP-329677405-172.17.0.3-1733201968871:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40961:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39414 dst: /127.0.0.1:40961 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:42,785 WARN [DataStreamer for file /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/WALs/32beccd76b35,42965,1733201969589/32beccd76b35%2C42965%2C1733201969589.1733201969796 block BP-329677405-172.17.0.3-1733201968871:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-329677405-172.17.0.3-1733201968871:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42039,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK], DatanodeInfoWithStorage[127.0.0.1:40961,DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40961,DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f,DISK]) is bad. 2024-12-03T04:59:42,785 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-252833222_22 at /127.0.0.1:39420 [Receiving block BP-329677405-172.17.0.3-1733201968871:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40961:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39420 dst: /127.0.0.1:40961 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:42,786 WARN [PacketResponder: BP-329677405-172.17.0.3-1733201968871:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40961] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:42,786 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-252833222_22 at /127.0.0.1:45804 [Receiving block BP-329677405-172.17.0.3-1733201968871:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42039:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45804 dst: /127.0.0.1:42039 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:42,786 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_732553995_22 at /127.0.0.1:45760 [Receiving block BP-329677405-172.17.0.3-1733201968871:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42039:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45760 dst: /127.0.0.1:42039 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:42,787 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_732553995_22 at /127.0.0.1:39378 [Receiving block BP-329677405-172.17.0.3-1733201968871:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40961:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39378 dst: /127.0.0.1:40961 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:42,951 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@784d9fd7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:59:42,951 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6dd025c5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:59:42,951 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:59:42,952 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@484325f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:59:42,952 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@631f73d8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.log.dir/,STOPPED} 2024-12-03T04:59:42,953 WARN [BP-329677405-172.17.0.3-1733201968871 heartbeating to localhost/127.0.0.1:46177 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T04:59:42,953 WARN [BP-329677405-172.17.0.3-1733201968871 heartbeating to localhost/127.0.0.1:46177 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-329677405-172.17.0.3-1733201968871 (Datanode Uuid 437a725a-5891-4a4e-8fa8-92e4661703dc) service to localhost/127.0.0.1:46177 2024-12-03T04:59:42,954 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/data/data3/current/BP-329677405-172.17.0.3-1733201968871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:59:42,954 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/data/data4/current/BP-329677405-172.17.0.3-1733201968871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:59:42,954 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T04:59:42,955 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T04:59:42,955 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T04:59:43,010 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:59:43,025 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:59:43,070 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:59:43,070 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:59:43,070 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T04:59:43,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f692dd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:59:43,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51fbd534{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:59:43,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:43,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:43,307 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3c09fcee{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/java.io.tmpdir/jetty-localhost-38335-hadoop-hdfs-3_4_1-tests_jar-_-any-791073812481470297/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:59:43,307 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7fbde2b7{HTTP/1.1, (http/1.1)}{localhost:38335} 2024-12-03T04:59:43,307 INFO [Time-limited test {}] server.Server(415): Started @167469ms 2024-12-03T04:59:43,309 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T04:59:43,510 WARN [ResponseProcessor for block BP-329677405-172.17.0.3-1733201968871:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-329677405-172.17.0.3-1733201968871:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:43,510 WARN [ResponseProcessor for block BP-329677405-172.17.0.3-1733201968871:blk_1073741833_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-329677405-172.17.0.3-1733201968871:blk_1073741833_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:43,510 WARN [ResponseProcessor for block BP-329677405-172.17.0.3-1733201968871:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-329677405-172.17.0.3-1733201968871:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:43,512 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-252833222_22 at /127.0.0.1:44436 [Receiving block BP-329677405-172.17.0.3-1733201968871:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42039:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44436 dst: /127.0.0.1:42039 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:43,513 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_732553995_22 at /127.0.0.1:44450 [Receiving block BP-329677405-172.17.0.3-1733201968871:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42039:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44450 dst: /127.0.0.1:42039 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:43,513 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-252833222_22 at /127.0.0.1:44426 [Receiving block BP-329677405-172.17.0.3-1733201968871:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42039:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44426 dst: /127.0.0.1:42039 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:43,564 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@63be899e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:59:43,565 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f7e1003{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:59:43,565 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:59:43,566 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15281d4e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:59:43,566 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b5cdfcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.log.dir/,STOPPED} 2024-12-03T04:59:43,569 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T04:59:43,569 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T04:59:43,569 WARN [BP-329677405-172.17.0.3-1733201968871 heartbeating to localhost/127.0.0.1:46177 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T04:59:43,569 WARN [BP-329677405-172.17.0.3-1733201968871 heartbeating to localhost/127.0.0.1:46177 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-329677405-172.17.0.3-1733201968871 (Datanode Uuid 0f0f8f1a-6f08-4a74-9e72-e877de382ee4) service to localhost/127.0.0.1:46177 2024-12-03T04:59:43,570 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/data/data1/current/BP-329677405-172.17.0.3-1733201968871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:59:43,573 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/data/data2/current/BP-329677405-172.17.0.3-1733201968871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:59:43,574 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T04:59:43,657 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:59:43,684 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:59:43,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:59:43,754 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:59:43,754 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T04:59:43,762 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7889bd43{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:59:43,762 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46dec6a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:59:43,814 WARN [Thread-1331 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T04:59:43,828 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x44ff140e833ef69f with lease ID 0x4f9e12cc7b00f379: from storage DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f node DatanodeRegistration(127.0.0.1:46357, datanodeUuid=437a725a-5891-4a4e-8fa8-92e4661703dc, infoPort=37691, infoSecurePort=0, ipcPort=42973, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T04:59:43,828 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x44ff140e833ef69f with lease ID 0x4f9e12cc7b00f379: from storage DS-d507e67d-8b43-42f5-8ab1-bb2d03c4e863 node DatanodeRegistration(127.0.0.1:46357, datanodeUuid=437a725a-5891-4a4e-8fa8-92e4661703dc, infoPort=37691, infoSecurePort=0, ipcPort=42973, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:59:44,056 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5c3b56c4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/java.io.tmpdir/jetty-localhost-42775-hadoop-hdfs-3_4_1-tests_jar-_-any-16161806050628630961/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:59:44,065 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7e1d853e{HTTP/1.1, (http/1.1)}{localhost:42775} 2024-12-03T04:59:44,065 INFO [Time-limited test {}] server.Server(415): Started @168227ms 2024-12-03T04:59:44,067 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T04:59:44,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:44,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:44,443 WARN [Thread-1362 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T04:59:44,458 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb3d1e0bd544d4a24 with lease ID 0x4f9e12cc7b00f37a: from storage DS-84045bae-cc71-4024-9178-1a28caebb905 node DatanodeRegistration(127.0.0.1:43011, datanodeUuid=0f0f8f1a-6f08-4a74-9e72-e877de382ee4, infoPort=43705, infoSecurePort=0, ipcPort=44319, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:59:44,458 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb3d1e0bd544d4a24 with lease ID 0x4f9e12cc7b00f37a: from storage DS-fb71848e-c666-4650-a644-d56c419475e4 node DatanodeRegistration(127.0.0.1:43011, datanodeUuid=0f0f8f1a-6f08-4a74-9e72-e877de382ee4, infoPort=43705, infoSecurePort=0, ipcPort=44319, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T04:59:45,260 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-03T04:59:45,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:45,263 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-03T04:59:45,265 ERROR [FSHLog-0-hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac-prefix:32beccd76b35,40541,1733201969664 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42039,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:45,265 WARN [FSHLog-0-hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac-prefix:32beccd76b35,40541,1733201969664 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42039,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:45,265 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 32beccd76b35%2C40541%2C1733201969664:(num 1733201970175) roll requested 2024-12-03T04:59:45,266 INFO [regionserver/32beccd76b35:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C40541%2C1733201969664.1733201985266 2024-12-03T04:59:45,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:45,325 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201970175 newFile=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 2024-12-03T04:59:45,325 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:45,325 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:45,326 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:45,472 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:45,472 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:45,473 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201970175 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 2024-12-03T04:59:45,474 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42039,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:45,475 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42039,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:45,475 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201970175 2024-12-03T04:59:45,477 WARN [IPC Server handler 3 on default port 46177 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201970175 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1013 2024-12-03T04:59:45,479 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201970175 after 3ms 2024-12-03T04:59:45,503 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37691:37691),(127.0.0.1/127.0.0.1:43705:43705)] 2024-12-03T04:59:45,503 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201970175 is not closed yet, will try archiving it next time 2024-12-03T04:59:46,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:46,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:46,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43011 is added to blk_1073741833_1017 (size=1632) 2024-12-03T04:59:47,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:47,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:47,510 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-03T04:59:47,825 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1013: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T04:59:48,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:48,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:49,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:49,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:49,480 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201970175 after 4004ms 2024-12-03T04:59:49,521 WARN [ResponseProcessor for block BP-329677405-172.17.0.3-1733201968871:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-329677405-172.17.0.3-1733201968871:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-329677405-172.17.0.3-1733201968871:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:43011,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:49,521 WARN [DataStreamer for file /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 block BP-329677405-172.17.0.3-1733201968871:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-329677405-172.17.0.3-1733201968871:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46357,DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f,DISK], DatanodeInfoWithStorage[127.0.0.1:43011,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43011,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK]) is bad. 2024-12-03T04:59:49,524 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-252833222_22 at /127.0.0.1:39520 [Receiving block BP-329677405-172.17.0.3-1733201968871:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:46357:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39520 dst: /127.0.0.1:46357 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:49,524 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-252833222_22 at /127.0.0.1:37764 [Receiving block BP-329677405-172.17.0.3-1733201968871:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43011:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37764 dst: /127.0.0.1:43011 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:49,567 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5c3b56c4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:59:49,568 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7e1d853e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:59:49,568 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:59:49,568 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46dec6a1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:59:49,569 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7889bd43{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.log.dir/,STOPPED} 2024-12-03T04:59:49,572 WARN [BP-329677405-172.17.0.3-1733201968871 heartbeating to localhost/127.0.0.1:46177 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T04:59:49,572 WARN [BP-329677405-172.17.0.3-1733201968871 heartbeating to localhost/127.0.0.1:46177 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-329677405-172.17.0.3-1733201968871 (Datanode Uuid 0f0f8f1a-6f08-4a74-9e72-e877de382ee4) service to localhost/127.0.0.1:46177 2024-12-03T04:59:49,573 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T04:59:49,573 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T04:59:49,573 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/data/data1/current/BP-329677405-172.17.0.3-1733201968871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:59:49,574 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/data/data2/current/BP-329677405-172.17.0.3-1733201968871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:59:49,574 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T04:59:49,633 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:59:49,638 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:59:49,659 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:59:49,659 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:59:49,659 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T04:59:49,660 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30c66b4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:59:49,660 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d23149{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:59:49,860 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@50ab774b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/java.io.tmpdir/jetty-localhost-32995-hadoop-hdfs-3_4_1-tests_jar-_-any-7262781889130772348/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:59:49,865 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@714162c1{HTTP/1.1, (http/1.1)}{localhost:32995} 2024-12-03T04:59:49,865 INFO [Time-limited test {}] server.Server(415): Started @174027ms 2024-12-03T04:59:49,868 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T04:59:49,962 WARN [ResponseProcessor for block BP-329677405-172.17.0.3-1733201968871:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-329677405-172.17.0.3-1733201968871:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:49,964 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-252833222_22 at /127.0.0.1:39550 [Receiving block BP-329677405-172.17.0.3-1733201968871:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:46357:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39550 dst: /127.0.0.1:46357 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:49,990 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3c09fcee{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:59:49,991 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7fbde2b7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T04:59:49,991 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T04:59:49,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51fbd534{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T04:59:49,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f692dd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.log.dir/,STOPPED} 2024-12-03T04:59:49,994 WARN [BP-329677405-172.17.0.3-1733201968871 heartbeating to localhost/127.0.0.1:46177 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T04:59:49,994 WARN [BP-329677405-172.17.0.3-1733201968871 heartbeating to localhost/127.0.0.1:46177 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-329677405-172.17.0.3-1733201968871 (Datanode Uuid 437a725a-5891-4a4e-8fa8-92e4661703dc) service to localhost/127.0.0.1:46177 2024-12-03T04:59:49,996 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T04:59:49,996 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T04:59:49,996 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/data/data4/current/BP-329677405-172.17.0.3-1733201968871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:59:49,996 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T04:59:49,996 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/data/data3/current/BP-329677405-172.17.0.3-1733201968871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T04:59:50,033 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T04:59:50,042 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T04:59:50,074 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T04:59:50,074 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T04:59:50,074 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T04:59:50,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61c3d880{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.log.dir/,AVAILABLE} 2024-12-03T04:59:50,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2048ade0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T04:59:50,123 WARN [Thread-1405 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T04:59:50,126 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x32fa40ce482dcff3 with lease ID 0x4f9e12cc7b00f37b: from storage DS-84045bae-cc71-4024-9178-1a28caebb905 node DatanodeRegistration(127.0.0.1:37723, datanodeUuid=0f0f8f1a-6f08-4a74-9e72-e877de382ee4, infoPort=34849, infoSecurePort=0, ipcPort=42967, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:59:50,127 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x32fa40ce482dcff3 with lease ID 0x4f9e12cc7b00f37b: from storage DS-fb71848e-c666-4650-a644-d56c419475e4 node DatanodeRegistration(127.0.0.1:37723, datanodeUuid=0f0f8f1a-6f08-4a74-9e72-e877de382ee4, infoPort=34849, infoSecurePort=0, ipcPort=42967, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T04:59:50,230 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@67187ab0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/java.io.tmpdir/jetty-localhost-45007-hadoop-hdfs-3_4_1-tests_jar-_-any-5676743682753583769/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T04:59:50,232 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@558f8fa7{HTTP/1.1, (http/1.1)}{localhost:45007} 2024-12-03T04:59:50,232 INFO [Time-limited test {}] server.Server(415): Started @174394ms 2024-12-03T04:59:50,234 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T04:59:50,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:50,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:50,503 WARN [Thread-1436 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T04:59:50,510 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xafd0dc81ee1f8ecc with lease ID 0x4f9e12cc7b00f37c: from storage DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f node DatanodeRegistration(127.0.0.1:42169, datanodeUuid=437a725a-5891-4a4e-8fa8-92e4661703dc, infoPort=42743, infoSecurePort=0, ipcPort=42935, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T04:59:50,511 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xafd0dc81ee1f8ecc with lease ID 0x4f9e12cc7b00f37c: from storage DS-d507e67d-8b43-42f5-8ab1-bb2d03c4e863 node DatanodeRegistration(127.0.0.1:42169, datanodeUuid=437a725a-5891-4a4e-8fa8-92e4661703dc, infoPort=42743, infoSecurePort=0, ipcPort=42935, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T04:59:51,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:51,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:51,336 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-03T04:59:51,338 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-03T04:59:51,340 ERROR [FSHLog-0-hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac-prefix:32beccd76b35,40541,1733201969664 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46357,DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:51,340 WARN [FSHLog-0-hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac-prefix:32beccd76b35,40541,1733201969664 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46357,DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:51,340 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 32beccd76b35%2C40541%2C1733201969664:(num 1733201985266) roll requested 2024-12-03T04:59:51,341 INFO [regionserver/32beccd76b35:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C40541%2C1733201969664.1733201991340 2024-12-03T04:59:51,359 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 newFile=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201991340 2024-12-03T04:59:51,359 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:51,359 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:51,359 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:51,360 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:51,360 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:51,360 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201991340 2024-12-03T04:59:51,360 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46357,DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:51,360 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46357,DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:51,361 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 2024-12-03T04:59:51,362 WARN [IPC Server handler 1 on default port 46177 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-03T04:59:51,362 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 after 1ms 2024-12-03T04:59:51,370 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34849:34849),(127.0.0.1/127.0.0.1:42743:42743)] 2024-12-03T04:59:51,371 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 is not closed yet, will try archiving it next time 2024-12-03T04:59:51,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741837_1020 (size=2427) 2024-12-03T04:59:52,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:52,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:53,143 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T04:59:53,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:53,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:53,372 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C40541%2C1733201969664.1733201993372 2024-12-03T04:59:53,426 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201991340 newFile=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201993372 2024-12-03T04:59:53,426 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:53,429 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:53,433 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:53,437 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:53,437 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:53,438 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201991340 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201993372 2024-12-03T04:59:53,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741838_1019 (size=1264) 2024-12-03T04:59:53,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37723 is added to blk_1073741838_1019 (size=1264) 2024-12-03T04:59:53,470 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 is not closed yet, will try archiving it next time 2024-12-03T04:59:53,481 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34849:34849),(127.0.0.1/127.0.0.1:42743:42743)] 2024-12-03T04:59:53,481 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 is not closed yet, will try archiving it next time 2024-12-03T04:59:53,482 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201970175 2024-12-03T04:59:53,482 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201970175 2024-12-03T04:59:53,482 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201970175 after 0ms 2024-12-03T04:59:53,486 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201970175 2024-12-03T04:59:53,516 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733201971189/Put/vlen=218/seqid=0] 2024-12-03T04:59:53,517 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733201980775/Put/vlen=1045/seqid=0] 2024-12-03T04:59:53,517 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201970175 2024-12-03T04:59:53,517 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 2024-12-03T04:59:53,517 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 2024-12-03T04:59:53,518 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 after 0ms 2024-12-03T04:59:53,518 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 2024-12-03T04:59:53,521 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@47422dbb[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42169, datanodeUuid=437a725a-5891-4a4e-8fa8-92e4661703dc, infoPort=42743, infoSecurePort=0, ipcPort=42935, storageInfo=lv=-57;cid=testClusterID;nsid=1432857738;c=1733201968871):Failed to transfer BP-329677405-172.17.0.3-1733201968871:blk_1073741837_1020 to 127.0.0.1:37723 got java.net.SocketException: Original Exception : java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockSender.sendPacket(BlockSender.java:613) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockSender.doSendBlock(BlockSender.java:829) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockSender.sendBlock(BlockSender.java:766) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3102) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Connection reset by peer ... 17 more 2024-12-03T04:59:53,522 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733201985265/Put/vlen=1045/seqid=0] 2024-12-03T04:59:53,522 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733201987514/Put/vlen=1045/seqid=0] 2024-12-03T04:59:53,523 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 2024-12-03T04:59:53,523 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201991340 2024-12-03T04:59:53,523 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201991340 2024-12-03T04:59:53,523 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201991340 after 0ms 2024-12-03T04:59:53,523 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201991340 2024-12-03T04:59:53,528 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733201991339/Put/vlen=1045/seqid=0] 2024-12-03T04:59:53,528 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201993372 2024-12-03T04:59:53,528 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201993372 2024-12-03T04:59:53,529 WARN [IPC Server handler 3 on default port 46177 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201993372 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-03T04:59:53,529 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201993372 after 1ms 2024-12-03T04:59:54,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:54,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:54,542 WARN [ResponseProcessor for block BP-329677405-172.17.0.3-1733201968871:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-329677405-172.17.0.3-1733201968871:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:54,542 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_732553995_22 at /127.0.0.1:52528 [Receiving block BP-329677405-172.17.0.3-1733201968871:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42169:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52528 dst: /127.0.0.1:42169 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:54,542 WARN [DataStreamer for file /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201993372 block BP-329677405-172.17.0.3-1733201968871:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-329677405-172.17.0.3-1733201968871:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37723,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK], DatanodeInfoWithStorage[127.0.0.1:42169,DS-ce57b09f-ac05-4fc1-af04-9b59bf15ac0f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37723,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK]) is bad. 2024-12-03T04:59:54,542 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_732553995_22 at /127.0.0.1:45120 [Receiving block BP-329677405-172.17.0.3-1733201968871:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:37723:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45120 dst: /127.0.0.1:37723 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:37723 remote=/127.0.0.1:45120]. Total timeout mills is 60000, 58871 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T04:59:54,543 WARN [DataStreamer for file /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201993372 block BP-329677405-172.17.0.3-1733201968871:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-329677405-172.17.0.3-1733201968871:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:54,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37723 is added to blk_1073741839_1022 (size=85) 2024-12-03T04:59:55,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:55,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:55,363 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201985266 after 4002ms 2024-12-03T04:59:56,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:56,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:57,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:57,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:57,530 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201993372 after 4002ms 2024-12-03T04:59:57,530 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201993372 2024-12-03T04:59:57,540 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201993372 2024-12-03T04:59:57,540 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-03T04:59:57,540 ERROR [FSHLog-0-hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac-prefix:32beccd76b35,40541,1733201969664.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42039,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:57,541 WARN [FSHLog-0-hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac-prefix:32beccd76b35,40541,1733201969664.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42039,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:57,545 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 32beccd76b35%2C40541%2C1733201969664.meta:.meta(num 1733201970545) roll requested 2024-12-03T04:59:57,545 INFO [regionserver/32beccd76b35:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C40541%2C1733201969664.meta.1733201997545.meta 2024-12-03T04:59:57,590 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:57,590 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:57,597 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:57,601 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:57,605 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:57,605 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.meta.1733201970545.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.meta.1733201997545.meta 2024-12-03T04:59:57,606 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42039,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:57,606 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42039,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:57,606 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.meta.1733201970545.meta 2024-12-03T04:59:57,607 WARN [IPC Server handler 3 on default port 46177 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.meta.1733201970545.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-12-03T04:59:57,608 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.meta.1733201970545.meta after 2ms 2024-12-03T04:59:57,623 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42743:42743),(127.0.0.1/127.0.0.1:34849:34849)] 2024-12-03T04:59:57,623 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.meta.1733201970545.meta is not closed yet, will try archiving it next time 2024-12-03T04:59:57,650 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/.tmp/info/6bfdc37c9b9d4d648433540a19579cc9 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a./info:regioninfo/1733201971195/Put/seqid=0 2024-12-03T04:59:57,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37723 is added to blk_1073741841_1025 (size=7125) 2024-12-03T04:59:57,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741841_1025 (size=7125) 2024-12-03T04:59:58,102 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/.tmp/info/6bfdc37c9b9d4d648433540a19579cc9 2024-12-03T04:59:58,157 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/.tmp/ns/b54f5fabc1724a89a6d05ed3fa312063 is 43, key is default/ns:d/1733201970592/Put/seqid=0 2024-12-03T04:59:58,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37723 is added to blk_1073741842_1026 (size=5153) 2024-12-03T04:59:58,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741842_1026 (size=5153) 2024-12-03T04:59:58,184 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/.tmp/ns/b54f5fabc1724a89a6d05ed3fa312063 2024-12-03T04:59:58,213 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/.tmp/table/f1c1062316b246d0b0fa5744dda375a8 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733201971216/Put/seqid=0 2024-12-03T04:59:58,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37723 is added to blk_1073741843_1027 (size=5438) 2024-12-03T04:59:58,224 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/.tmp/table/f1c1062316b246d0b0fa5744dda375a8 2024-12-03T04:59:58,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741843_1027 (size=5438) 2024-12-03T04:59:58,232 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/.tmp/info/6bfdc37c9b9d4d648433540a19579cc9 as hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/info/6bfdc37c9b9d4d648433540a19579cc9 2024-12-03T04:59:58,244 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/info/6bfdc37c9b9d4d648433540a19579cc9, entries=10, sequenceid=11, filesize=7.0 K 2024-12-03T04:59:58,246 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/.tmp/ns/b54f5fabc1724a89a6d05ed3fa312063 as hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/ns/b54f5fabc1724a89a6d05ed3fa312063 2024-12-03T04:59:58,255 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/ns/b54f5fabc1724a89a6d05ed3fa312063, entries=2, sequenceid=11, filesize=5.0 K 2024-12-03T04:59:58,256 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/.tmp/table/f1c1062316b246d0b0fa5744dda375a8 as hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/table/f1c1062316b246d0b0fa5744dda375a8 2024-12-03T04:59:58,266 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/table/f1c1062316b246d0b0fa5744dda375a8, entries=2, sequenceid=11, filesize=5.3 K 2024-12-03T04:59:58,267 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 727ms, sequenceid=11, compaction requested=false 2024-12-03T04:59:58,268 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-03T04:59:58,268 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 926fee2387dd4e18f8e458c63fad081a 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-03T04:59:58,268 ERROR [FSHLog-0-hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac-prefix:32beccd76b35,40541,1733201969664 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-329677405-172.17.0.3-1733201968871:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:58,269 WARN [FSHLog-0-hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac-prefix:32beccd76b35,40541,1733201969664 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-329677405-172.17.0.3-1733201968871:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:58,269 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 32beccd76b35%2C40541%2C1733201969664:(num 1733201993372) roll requested 2024-12-03T04:59:58,269 INFO [regionserver/32beccd76b35:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C40541%2C1733201969664.1733201998269 2024-12-03T04:59:58,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:58,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:58,373 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201993372 newFile=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201998269 2024-12-03T04:59:58,373 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:58,374 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:58,374 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:58,374 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:58,374 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:58,375 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201993372 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201998269 2024-12-03T04:59:58,375 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-329677405-172.17.0.3-1733201968871:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:58,376 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-329677405-172.17.0.3-1733201968871:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor113.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T04:59:58,376 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201993372 2024-12-03T04:59:58,379 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201993372 after 3ms 2024-12-03T04:59:58,385 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.1733201993372 to hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/oldWALs/32beccd76b35%2C40541%2C1733201969664.1733201993372 2024-12-03T04:59:58,433 DEBUG [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42743:42743),(127.0.0.1/127.0.0.1:34849:34849)] 2024-12-03T04:59:58,472 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/926fee2387dd4e18f8e458c63fad081a/.tmp/info/ec400723970043b6a384421f7d15dda2 is 1080, key is row1002/info:/1733201980775/Put/seqid=0 2024-12-03T04:59:58,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37723 is added to blk_1073741845_1029 (size=9270) 2024-12-03T04:59:58,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741845_1029 (size=9270) 2024-12-03T04:59:58,926 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/926fee2387dd4e18f8e458c63fad081a/.tmp/info/ec400723970043b6a384421f7d15dda2 2024-12-03T04:59:58,958 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/926fee2387dd4e18f8e458c63fad081a/.tmp/info/ec400723970043b6a384421f7d15dda2 as hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/926fee2387dd4e18f8e458c63fad081a/info/ec400723970043b6a384421f7d15dda2 2024-12-03T04:59:58,968 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/926fee2387dd4e18f8e458c63fad081a/info/ec400723970043b6a384421f7d15dda2, entries=4, sequenceid=8, filesize=9.1 K 2024-12-03T04:59:58,973 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 926fee2387dd4e18f8e458c63fad081a in 705ms, sequenceid=8, compaction requested=false 2024-12-03T04:59:58,974 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 926fee2387dd4e18f8e458c63fad081a: 2024-12-03T04:59:58,982 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T04:59:58,983 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T04:59:58,983 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T04:59:58,983 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:59:58,984 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T04:59:58,984 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:59:58,984 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T04:59:58,984 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=528843661, stopped=false 2024-12-03T04:59:58,985 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=32beccd76b35,42965,1733201969589 2024-12-03T04:59:58,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T04:59:58,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T04:59:58,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:58,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T04:59:58,990 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T04:59:58,991 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:59:58,991 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T04:59:58,992 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T04:59:58,992 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T04:59:58,992 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:59:58,993 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '32beccd76b35,40541,1733201969664' ***** 2024-12-03T04:59:58,994 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T04:59:58,994 INFO [RS:0;32beccd76b35:40541 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T04:59:58,994 INFO [RS:0;32beccd76b35:40541 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T04:59:58,994 INFO [RS:0;32beccd76b35:40541 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T04:59:58,994 INFO [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer(3091): Received CLOSE for 926fee2387dd4e18f8e458c63fad081a 2024-12-03T04:59:58,994 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T04:59:58,997 INFO [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer(959): stopping server 32beccd76b35,40541,1733201969664 2024-12-03T04:59:58,997 INFO [RS:0;32beccd76b35:40541 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T04:59:58,997 INFO [RS:0;32beccd76b35:40541 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;32beccd76b35:40541. 2024-12-03T04:59:58,998 DEBUG [RS:0;32beccd76b35:40541 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T04:59:58,998 DEBUG [RS:0;32beccd76b35:40541 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T04:59:58,998 INFO [RS:0;32beccd76b35:40541 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T04:59:58,998 INFO [RS:0;32beccd76b35:40541 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T04:59:58,998 INFO [RS:0;32beccd76b35:40541 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T04:59:58,998 INFO [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T04:59:58,998 INFO [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-03T04:59:58,998 DEBUG [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 926fee2387dd4e18f8e458c63fad081a=TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a.} 2024-12-03T04:59:58,998 DEBUG [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 926fee2387dd4e18f8e458c63fad081a 2024-12-03T04:59:58,999 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 926fee2387dd4e18f8e458c63fad081a, disabling compactions & flushes 2024-12-03T04:59:58,999 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T04:59:58,999 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a. 2024-12-03T04:59:58,999 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T04:59:58,999 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a. 2024-12-03T04:59:58,999 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T04:59:58,999 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a. after waiting 0 ms 2024-12-03T04:59:58,999 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T04:59:58,999 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a. 2024-12-03T04:59:58,999 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T04:59:59,021 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/default/TestLogRolling-testLogRollOnPipelineRestart/926fee2387dd4e18f8e458c63fad081a/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-03T04:59:59,023 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a. 2024-12-03T04:59:59,023 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 926fee2387dd4e18f8e458c63fad081a: Waiting for close lock at 1733201998999Running coprocessor pre-close hooks at 1733201998999Disabling compacts and flushes for region at 1733201998999Disabling writes for close at 1733201998999Writing region close event to WAL at 1733201999005 (+6 ms)Running coprocessor post-close hooks at 1733201999023 (+18 ms)Closed at 1733201999023 2024-12-03T04:59:59,023 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733201970711.926fee2387dd4e18f8e458c63fad081a. 2024-12-03T04:59:59,024 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-03T04:59:59,025 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T04:59:59,025 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T04:59:59,025 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733201998999Running coprocessor pre-close hooks at 1733201998999Disabling compacts and flushes for region at 1733201998999Disabling writes for close at 1733201998999Writing region close event to WAL at 1733201999020 (+21 ms)Running coprocessor post-close hooks at 1733201999025 (+5 ms)Closed at 1733201999025 2024-12-03T04:59:59,025 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T04:59:59,079 INFO [regionserver/32beccd76b35:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T04:59:59,079 INFO [regionserver/32beccd76b35:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T04:59:59,199 INFO [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer(976): stopping server 32beccd76b35,40541,1733201969664; all regions closed. 2024-12-03T04:59:59,203 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:59,204 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:59,204 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:59,213 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:59,213 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T04:59:59,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37723 is added to blk_1073741840_1023 (size=825) 2024-12-03T04:59:59,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741840_1023 (size=825) 2024-12-03T04:59:59,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:59,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T04:59:59,512 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T04:59:59,565 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T05:00:00,042 INFO [regionserver/32beccd76b35:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T05:00:00,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:00,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:01,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:01,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:01,609 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.meta.1733201970545.meta after 4003ms 2024-12-03T05:00:01,609 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/WALs/32beccd76b35,40541,1733201969664/32beccd76b35%2C40541%2C1733201969664.meta.1733201970545.meta to hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/oldWALs/32beccd76b35%2C40541%2C1733201969664.meta.1733201970545.meta 2024-12-03T05:00:01,613 DEBUG [RS:0;32beccd76b35:40541 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/oldWALs 2024-12-03T05:00:01,613 INFO [RS:0;32beccd76b35:40541 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 32beccd76b35%2C40541%2C1733201969664.meta:.meta(num 1733201997545) 2024-12-03T05:00:01,614 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:01,614 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:01,614 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:01,615 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:01,615 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:01,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37723 is added to blk_1073741844_1028 (size=1162) 2024-12-03T05:00:01,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741844_1028 (size=1162) 2024-12-03T05:00:01,624 DEBUG [RS:0;32beccd76b35:40541 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/oldWALs 2024-12-03T05:00:01,624 INFO [RS:0;32beccd76b35:40541 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 32beccd76b35%2C40541%2C1733201969664:(num 1733201998269) 2024-12-03T05:00:01,624 DEBUG [RS:0;32beccd76b35:40541 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:00:01,624 INFO [RS:0;32beccd76b35:40541 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T05:00:01,624 INFO [RS:0;32beccd76b35:40541 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T05:00:01,624 INFO [RS:0;32beccd76b35:40541 {}] hbase.ChoreService(370): Chore service for: regionserver/32beccd76b35:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T05:00:01,625 INFO [RS:0;32beccd76b35:40541 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T05:00:01,625 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T05:00:01,625 INFO [RS:0;32beccd76b35:40541 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40541 2024-12-03T05:00:01,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/32beccd76b35,40541,1733201969664 2024-12-03T05:00:01,628 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T05:00:01,628 INFO [RS:0;32beccd76b35:40541 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T05:00:01,630 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [32beccd76b35,40541,1733201969664] 2024-12-03T05:00:01,632 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/32beccd76b35,40541,1733201969664 already deleted, retry=false 2024-12-03T05:00:01,632 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 32beccd76b35,40541,1733201969664 expired; onlineServers=0 2024-12-03T05:00:01,632 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '32beccd76b35,42965,1733201969589' ***** 2024-12-03T05:00:01,632 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T05:00:01,632 INFO [M:0;32beccd76b35:42965 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T05:00:01,632 INFO [M:0;32beccd76b35:42965 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T05:00:01,632 DEBUG [M:0;32beccd76b35:42965 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T05:00:01,632 DEBUG [M:0;32beccd76b35:42965 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T05:00:01,632 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T05:00:01,632 DEBUG [master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733201969926 {}] cleaner.HFileCleaner(306): Exit Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733201969926,5,FailOnTimeoutGroup] 2024-12-03T05:00:01,632 DEBUG [master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733201969926 {}] cleaner.HFileCleaner(306): Exit Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733201969926,5,FailOnTimeoutGroup] 2024-12-03T05:00:01,633 INFO [M:0;32beccd76b35:42965 {}] hbase.ChoreService(370): Chore service for: master/32beccd76b35:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T05:00:01,633 INFO [M:0;32beccd76b35:42965 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T05:00:01,633 DEBUG [M:0;32beccd76b35:42965 {}] master.HMaster(1795): Stopping service threads 2024-12-03T05:00:01,633 INFO [M:0;32beccd76b35:42965 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T05:00:01,633 INFO [M:0;32beccd76b35:42965 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T05:00:01,633 INFO [M:0;32beccd76b35:42965 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T05:00:01,634 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T05:00:01,635 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T05:00:01,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:00:01,636 DEBUG [M:0;32beccd76b35:42965 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-12-03T05:00:01,637 DEBUG [M:0;32beccd76b35:42965 {}] master.ActiveMasterManager(353): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-03T05:00:01,637 INFO [M:0;32beccd76b35:42965 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/.lastflushedseqids 2024-12-03T05:00:01,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37723 is added to blk_1073741846_1030 (size=120) 2024-12-03T05:00:01,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741846_1030 (size=120) 2024-12-03T05:00:01,644 INFO [M:0;32beccd76b35:42965 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T05:00:01,644 INFO [M:0;32beccd76b35:42965 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T05:00:01,645 DEBUG [M:0;32beccd76b35:42965 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T05:00:01,645 INFO [M:0;32beccd76b35:42965 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:00:01,645 DEBUG [M:0;32beccd76b35:42965 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:00:01,645 DEBUG [M:0;32beccd76b35:42965 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T05:00:01,645 DEBUG [M:0;32beccd76b35:42965 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:00:01,645 INFO [M:0;32beccd76b35:42965 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-12-03T05:00:01,645 ERROR [FSHLog-0-hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData-prefix:32beccd76b35,42965,1733201969589 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42039,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T05:00:01,646 WARN [FSHLog-0-hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData-prefix:32beccd76b35,42965,1733201969589 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42039,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T05:00:01,646 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 32beccd76b35%2C42965%2C1733201969589:(num 1733201969796) roll requested 2024-12-03T05:00:01,646 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C42965%2C1733201969589.1733202001646 2024-12-03T05:00:01,651 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:01,651 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:01,651 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:01,651 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:01,651 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:01,652 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/WALs/32beccd76b35,42965,1733201969589/32beccd76b35%2C42965%2C1733201969589.1733201969796 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/WALs/32beccd76b35,42965,1733201969589/32beccd76b35%2C42965%2C1733201969589.1733202001646 2024-12-03T05:00:01,652 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42039,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T05:00:01,652 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42039,DS-84045bae-cc71-4024-9178-1a28caebb905,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-03T05:00:01,652 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/WALs/32beccd76b35,42965,1733201969589/32beccd76b35%2C42965%2C1733201969589.1733201969796 2024-12-03T05:00:01,653 WARN [IPC Server handler 0 on default port 46177 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/WALs/32beccd76b35,42965,1733201969589/32beccd76b35%2C42965%2C1733201969589.1733201969796 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-12-03T05:00:01,653 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34849:34849),(127.0.0.1/127.0.0.1:42743:42743)] 2024-12-03T05:00:01,653 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/WALs/32beccd76b35,42965,1733201969589/32beccd76b35%2C42965%2C1733201969589.1733201969796 after 1ms 2024-12-03T05:00:01,653 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/WALs/32beccd76b35,42965,1733201969589/32beccd76b35%2C42965%2C1733201969589.1733201969796 is not closed yet, will try archiving it next time 2024-12-03T05:00:01,671 DEBUG [M:0;32beccd76b35:42965 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5a6f34d71d8241a8a59a853c0981c19a is 82, key is hbase:meta,,1/info:regioninfo/1733201970572/Put/seqid=0 2024-12-03T05:00:01,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37723 is added to blk_1073741848_1033 (size=5672) 2024-12-03T05:00:01,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741848_1033 (size=5672) 2024-12-03T05:00:01,680 INFO [M:0;32beccd76b35:42965 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5a6f34d71d8241a8a59a853c0981c19a 2024-12-03T05:00:01,703 DEBUG [M:0;32beccd76b35:42965 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ae30203fdd9a4e6e8c60b1b2d5dd0666 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733201971223/Put/seqid=0 2024-12-03T05:00:01,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37723 is added to blk_1073741849_1034 (size=6119) 2024-12-03T05:00:01,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741849_1034 (size=6119) 2024-12-03T05:00:01,713 INFO [M:0;32beccd76b35:42965 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ae30203fdd9a4e6e8c60b1b2d5dd0666 2024-12-03T05:00:01,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T05:00:01,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40541-0x1015176364a0001, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T05:00:01,731 INFO [RS:0;32beccd76b35:40541 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T05:00:01,731 INFO [RS:0;32beccd76b35:40541 {}] regionserver.HRegionServer(1031): Exiting; stopping=32beccd76b35,40541,1733201969664; zookeeper connection closed. 2024-12-03T05:00:01,731 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7a254862 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7a254862 2024-12-03T05:00:01,732 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T05:00:01,739 DEBUG [M:0;32beccd76b35:42965 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0164da148cea43168de7ff7de1337089 is 69, key is 32beccd76b35,40541,1733201969664/rs:state/1733201970006/Put/seqid=0 2024-12-03T05:00:01,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741850_1035 (size=5156) 2024-12-03T05:00:01,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37723 is added to blk_1073741850_1035 (size=5156) 2024-12-03T05:00:01,749 INFO [M:0;32beccd76b35:42965 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0164da148cea43168de7ff7de1337089 2024-12-03T05:00:01,774 DEBUG [M:0;32beccd76b35:42965 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3c246f23b54f452cabefc86fa4043a6e is 52, key is load_balancer_on/state:d/1733201970704/Put/seqid=0 2024-12-03T05:00:01,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741851_1036 (size=5056) 2024-12-03T05:00:01,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37723 is added to blk_1073741851_1036 (size=5056) 2024-12-03T05:00:01,782 INFO [M:0;32beccd76b35:42965 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3c246f23b54f452cabefc86fa4043a6e 2024-12-03T05:00:01,788 DEBUG [M:0;32beccd76b35:42965 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5a6f34d71d8241a8a59a853c0981c19a as hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5a6f34d71d8241a8a59a853c0981c19a 2024-12-03T05:00:01,794 INFO [M:0;32beccd76b35:42965 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5a6f34d71d8241a8a59a853c0981c19a, entries=8, sequenceid=56, filesize=5.5 K 2024-12-03T05:00:01,795 DEBUG [M:0;32beccd76b35:42965 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ae30203fdd9a4e6e8c60b1b2d5dd0666 as hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ae30203fdd9a4e6e8c60b1b2d5dd0666 2024-12-03T05:00:01,801 INFO [M:0;32beccd76b35:42965 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ae30203fdd9a4e6e8c60b1b2d5dd0666, entries=6, sequenceid=56, filesize=6.0 K 2024-12-03T05:00:01,802 DEBUG [M:0;32beccd76b35:42965 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0164da148cea43168de7ff7de1337089 as hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0164da148cea43168de7ff7de1337089 2024-12-03T05:00:01,807 INFO [M:0;32beccd76b35:42965 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0164da148cea43168de7ff7de1337089, entries=1, sequenceid=56, filesize=5.0 K 2024-12-03T05:00:01,808 DEBUG [M:0;32beccd76b35:42965 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3c246f23b54f452cabefc86fa4043a6e as hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3c246f23b54f452cabefc86fa4043a6e 2024-12-03T05:00:01,817 INFO [M:0;32beccd76b35:42965 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3c246f23b54f452cabefc86fa4043a6e, entries=1, sequenceid=56, filesize=4.9 K 2024-12-03T05:00:01,819 INFO [M:0;32beccd76b35:42965 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 173ms, sequenceid=56, compaction requested=false 2024-12-03T05:00:01,824 INFO [M:0;32beccd76b35:42965 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:00:01,824 DEBUG [M:0;32beccd76b35:42965 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733202001645Disabling compacts and flushes for region at 1733202001645Disabling writes for close at 1733202001645Obtaining lock to block concurrent updates at 1733202001645Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733202001645Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1733202001646 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733202001653 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733202001654 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733202001671 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733202001671Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733202001686 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733202001703 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733202001703Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733202001721 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733202001739 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733202001739Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733202001755 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733202001774 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733202001774Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24bb3712: reopening flushed file at 1733202001787 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10ccb559: reopening flushed file at 1733202001794 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2742d22: reopening flushed file at 1733202001801 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2539a327: reopening flushed file at 1733202001807 (+6 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 173ms, sequenceid=56, compaction requested=false at 1733202001819 (+12 ms)Writing region close event to WAL at 1733202001824 (+5 ms)Closed at 1733202001824 2024-12-03T05:00:01,825 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:01,825 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:01,825 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:01,825 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:01,825 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:01,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42169 is added to blk_1073741847_1031 (size=757) 2024-12-03T05:00:01,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37723 is added to blk_1073741847_1031 (size=757) 2024-12-03T05:00:02,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:02,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:02,833 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T05:00:02,833 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T05:00:02,840 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-03T05:00:02,841 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-03T05:00:03,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:03,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:04,023 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,024 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,039 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,040 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,040 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,040 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,040 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,041 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,045 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,052 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:04,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:04,555 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T05:00:04,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,590 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,590 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,591 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,593 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,593 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,594 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:04,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:05,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:05,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:05,512 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-03T05:00:05,654 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/WALs/32beccd76b35,42965,1733201969589/32beccd76b35%2C42965%2C1733201969589.1733201969796 after 4002ms 2024-12-03T05:00:05,654 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/WALs/32beccd76b35,42965,1733201969589/32beccd76b35%2C42965%2C1733201969589.1733201969796 to hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/oldWALs/32beccd76b35%2C42965%2C1733201969589.1733201969796 2024-12-03T05:00:05,658 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/MasterData/oldWALs/32beccd76b35%2C42965%2C1733201969589.1733201969796 to hdfs://localhost:46177/user/jenkins/test-data/616ccb99-4b56-b429-da99-6521c3d759ac/oldWALs/32beccd76b35%2C42965%2C1733201969589.1733201969796$masterlocalwal$ 2024-12-03T05:00:05,658 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T05:00:05,658 INFO [M:0;32beccd76b35:42965 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T05:00:05,658 INFO [M:0;32beccd76b35:42965 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42965 2024-12-03T05:00:05,659 INFO [M:0;32beccd76b35:42965 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T05:00:05,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T05:00:05,761 INFO [M:0;32beccd76b35:42965 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T05:00:05,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42965-0x1015176364a0000, quorum=127.0.0.1:58503, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T05:00:05,764 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@67187ab0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T05:00:05,764 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@558f8fa7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T05:00:05,764 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T05:00:05,765 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2048ade0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T05:00:05,765 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61c3d880{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.log.dir/,STOPPED} 2024-12-03T05:00:05,766 WARN [BP-329677405-172.17.0.3-1733201968871 heartbeating to localhost/127.0.0.1:46177 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T05:00:05,766 WARN [BP-329677405-172.17.0.3-1733201968871 heartbeating to localhost/127.0.0.1:46177 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-329677405-172.17.0.3-1733201968871 (Datanode Uuid 437a725a-5891-4a4e-8fa8-92e4661703dc) service to localhost/127.0.0.1:46177 2024-12-03T05:00:05,766 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T05:00:05,766 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T05:00:05,767 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/data/data3/current/BP-329677405-172.17.0.3-1733201968871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T05:00:05,767 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/data/data4/current/BP-329677405-172.17.0.3-1733201968871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T05:00:05,767 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T05:00:05,769 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@50ab774b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T05:00:05,769 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@714162c1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T05:00:05,770 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T05:00:05,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d23149{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T05:00:05,770 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30c66b4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.log.dir/,STOPPED} 2024-12-03T05:00:05,771 WARN [BP-329677405-172.17.0.3-1733201968871 heartbeating to localhost/127.0.0.1:46177 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T05:00:05,771 WARN [BP-329677405-172.17.0.3-1733201968871 heartbeating to localhost/127.0.0.1:46177 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-329677405-172.17.0.3-1733201968871 (Datanode Uuid 0f0f8f1a-6f08-4a74-9e72-e877de382ee4) service to localhost/127.0.0.1:46177 2024-12-03T05:00:05,771 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T05:00:05,772 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T05:00:05,772 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/data/data1/current/BP-329677405-172.17.0.3-1733201968871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T05:00:05,772 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/cluster_d71912db-e3b1-7130-3cb8-411025cb1efd/data/data2/current/BP-329677405-172.17.0.3-1733201968871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T05:00:05,773 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T05:00:05,779 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@57f89277{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T05:00:05,780 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2342942b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T05:00:05,780 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T05:00:05,780 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7945c5a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T05:00:05,780 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c4c555c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.log.dir/,STOPPED} 2024-12-03T05:00:05,787 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T05:00:05,813 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T05:00:05,825 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=180 (was 154) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46177 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:46177 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46177 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46177 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46177 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:46177 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:46177 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:46177 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=912 (was 494) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3529 (was 4819) 2024-12-03T05:00:05,835 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=180, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=912, ProcessCount=11, AvailableMemoryMB=3529 2024-12-03T05:00:05,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T05:00:05,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.log.dir so I do NOT create it in target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf 2024-12-03T05:00:05,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/570e58a4-a3dd-8ca2-33b3-4db5990c2b12/hadoop.tmp.dir so I do NOT create it in target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf 2024-12-03T05:00:05,836 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/cluster_90211db9-1009-0dcc-f315-1af3237589ae, deleteOnExit=true 2024-12-03T05:00:05,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T05:00:05,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/test.cache.data in system properties and HBase conf 2024-12-03T05:00:05,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T05:00:05,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/hadoop.log.dir in system properties and HBase conf 2024-12-03T05:00:05,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T05:00:05,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T05:00:05,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T05:00:05,837 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T05:00:05,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T05:00:05,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T05:00:05,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T05:00:05,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T05:00:05,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T05:00:05,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T05:00:05,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T05:00:05,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T05:00:05,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T05:00:05,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/nfs.dump.dir in system properties and HBase conf 2024-12-03T05:00:05,839 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/java.io.tmpdir in system properties and HBase conf 2024-12-03T05:00:05,839 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T05:00:05,839 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T05:00:05,839 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T05:00:05,858 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T05:00:05,947 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T05:00:05,953 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T05:00:05,979 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T05:00:05,980 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T05:00:05,980 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T05:00:05,986 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T05:00:05,986 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a686da4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/hadoop.log.dir/,AVAILABLE} 2024-12-03T05:00:05,986 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26880a4e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T05:00:06,143 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@41398064{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/java.io.tmpdir/jetty-localhost-34407-hadoop-hdfs-3_4_1-tests_jar-_-any-17362845565763953530/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T05:00:06,143 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1411d2ea{HTTP/1.1, (http/1.1)}{localhost:34407} 2024-12-03T05:00:06,143 INFO [Time-limited test {}] server.Server(415): Started @190306ms 2024-12-03T05:00:06,159 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T05:00:06,243 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T05:00:06,246 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T05:00:06,247 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T05:00:06,247 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T05:00:06,247 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T05:00:06,248 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e21aa83{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/hadoop.log.dir/,AVAILABLE} 2024-12-03T05:00:06,249 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ad8fe14{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T05:00:06,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:06,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:06,385 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55e190a0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/java.io.tmpdir/jetty-localhost-38851-hadoop-hdfs-3_4_1-tests_jar-_-any-10660292678563601038/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T05:00:06,386 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@cad26ca{HTTP/1.1, (http/1.1)}{localhost:38851} 2024-12-03T05:00:06,386 INFO [Time-limited test {}] server.Server(415): Started @190548ms 2024-12-03T05:00:06,387 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T05:00:06,454 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T05:00:06,460 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T05:00:06,461 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T05:00:06,461 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T05:00:06,461 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T05:00:06,462 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b8dfaa1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/hadoop.log.dir/,AVAILABLE} 2024-12-03T05:00:06,462 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27dc1529{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T05:00:06,518 WARN [Thread-1629 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/cluster_90211db9-1009-0dcc-f315-1af3237589ae/data/data2/current/BP-1614527077-172.17.0.3-1733202005879/current, will proceed with Du for space computation calculation, 2024-12-03T05:00:06,518 WARN [Thread-1628 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/cluster_90211db9-1009-0dcc-f315-1af3237589ae/data/data1/current/BP-1614527077-172.17.0.3-1733202005879/current, will proceed with Du for space computation calculation, 2024-12-03T05:00:06,537 WARN [Thread-1607 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T05:00:06,540 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xabfb8eb4626138fe with lease ID 0x60012dff8524fb1a: Processing first storage report for DS-cc32d56f-a25f-435a-b0fe-40442938b5a8 from datanode DatanodeRegistration(127.0.0.1:37069, datanodeUuid=868b60b5-14e2-47a9-9978-49b6406460c8, infoPort=41323, infoSecurePort=0, ipcPort=40397, storageInfo=lv=-57;cid=testClusterID;nsid=648225091;c=1733202005879) 2024-12-03T05:00:06,540 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xabfb8eb4626138fe with lease ID 0x60012dff8524fb1a: from storage DS-cc32d56f-a25f-435a-b0fe-40442938b5a8 node DatanodeRegistration(127.0.0.1:37069, datanodeUuid=868b60b5-14e2-47a9-9978-49b6406460c8, infoPort=41323, infoSecurePort=0, ipcPort=40397, storageInfo=lv=-57;cid=testClusterID;nsid=648225091;c=1733202005879), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T05:00:06,540 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xabfb8eb4626138fe with lease ID 0x60012dff8524fb1a: Processing first storage report for DS-562162e7-fdf1-4bbe-bbb7-2a520372916e from datanode DatanodeRegistration(127.0.0.1:37069, datanodeUuid=868b60b5-14e2-47a9-9978-49b6406460c8, infoPort=41323, infoSecurePort=0, ipcPort=40397, storageInfo=lv=-57;cid=testClusterID;nsid=648225091;c=1733202005879) 2024-12-03T05:00:06,540 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xabfb8eb4626138fe with lease ID 0x60012dff8524fb1a: from storage DS-562162e7-fdf1-4bbe-bbb7-2a520372916e node DatanodeRegistration(127.0.0.1:37069, datanodeUuid=868b60b5-14e2-47a9-9978-49b6406460c8, infoPort=41323, infoSecurePort=0, ipcPort=40397, storageInfo=lv=-57;cid=testClusterID;nsid=648225091;c=1733202005879), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T05:00:06,597 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@61122961{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/java.io.tmpdir/jetty-localhost-33905-hadoop-hdfs-3_4_1-tests_jar-_-any-12633831466984181096/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T05:00:06,598 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15f0a5cf{HTTP/1.1, (http/1.1)}{localhost:33905} 2024-12-03T05:00:06,598 INFO [Time-limited test {}] server.Server(415): Started @190760ms 2024-12-03T05:00:06,599 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T05:00:06,728 WARN [Thread-1654 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/cluster_90211db9-1009-0dcc-f315-1af3237589ae/data/data3/current/BP-1614527077-172.17.0.3-1733202005879/current, will proceed with Du for space computation calculation, 2024-12-03T05:00:06,733 WARN [Thread-1655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/cluster_90211db9-1009-0dcc-f315-1af3237589ae/data/data4/current/BP-1614527077-172.17.0.3-1733202005879/current, will proceed with Du for space computation calculation, 2024-12-03T05:00:06,759 WARN [Thread-1643 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T05:00:06,762 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbbefe5574b8d26d5 with lease ID 0x60012dff8524fb1b: Processing first storage report for DS-071bb0db-de40-4907-923b-f4bca1149aa5 from datanode DatanodeRegistration(127.0.0.1:35113, datanodeUuid=4562c1a3-be6e-41b7-b75c-6031be9c107d, infoPort=33301, infoSecurePort=0, ipcPort=35073, storageInfo=lv=-57;cid=testClusterID;nsid=648225091;c=1733202005879) 2024-12-03T05:00:06,762 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbbefe5574b8d26d5 with lease ID 0x60012dff8524fb1b: from storage DS-071bb0db-de40-4907-923b-f4bca1149aa5 node DatanodeRegistration(127.0.0.1:35113, datanodeUuid=4562c1a3-be6e-41b7-b75c-6031be9c107d, infoPort=33301, infoSecurePort=0, ipcPort=35073, storageInfo=lv=-57;cid=testClusterID;nsid=648225091;c=1733202005879), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T05:00:06,762 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbbefe5574b8d26d5 with lease ID 0x60012dff8524fb1b: Processing first storage report for DS-875ab3fc-0448-4424-8311-db647caa8f7d from datanode DatanodeRegistration(127.0.0.1:35113, datanodeUuid=4562c1a3-be6e-41b7-b75c-6031be9c107d, infoPort=33301, infoSecurePort=0, ipcPort=35073, storageInfo=lv=-57;cid=testClusterID;nsid=648225091;c=1733202005879) 2024-12-03T05:00:06,762 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbbefe5574b8d26d5 with lease ID 0x60012dff8524fb1b: from storage DS-875ab3fc-0448-4424-8311-db647caa8f7d node DatanodeRegistration(127.0.0.1:35113, datanodeUuid=4562c1a3-be6e-41b7-b75c-6031be9c107d, infoPort=33301, infoSecurePort=0, ipcPort=35073, storageInfo=lv=-57;cid=testClusterID;nsid=648225091;c=1733202005879), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T05:00:06,834 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf 2024-12-03T05:00:06,915 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/cluster_90211db9-1009-0dcc-f315-1af3237589ae/zookeeper_0, clientPort=53408, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/cluster_90211db9-1009-0dcc-f315-1af3237589ae/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/cluster_90211db9-1009-0dcc-f315-1af3237589ae/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T05:00:06,917 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53408 2024-12-03T05:00:06,917 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:00:06,919 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:00:06,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741825_1001 (size=7) 2024-12-03T05:00:06,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741825_1001 (size=7) 2024-12-03T05:00:07,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:07,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:07,396 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9 with version=8 2024-12-03T05:00:07,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/hbase-staging 2024-12-03T05:00:07,399 INFO [Time-limited test {}] client.ConnectionUtils(128): master/32beccd76b35:0 server-side Connection retries=45 2024-12-03T05:00:07,399 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T05:00:07,399 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T05:00:07,399 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T05:00:07,399 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T05:00:07,399 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T05:00:07,399 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T05:00:07,400 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T05:00:07,411 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42197 2024-12-03T05:00:07,413 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42197 connecting to ZooKeeper ensemble=127.0.0.1:53408 2024-12-03T05:00:07,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:421970x0, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T05:00:07,473 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42197-0x1015176c80d0000 connected 2024-12-03T05:00:07,644 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:00:07,646 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:00:07,655 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T05:00:07,655 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9, hbase.cluster.distributed=false 2024-12-03T05:00:07,660 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T05:00:07,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42197 2024-12-03T05:00:07,688 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42197 2024-12-03T05:00:07,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42197 2024-12-03T05:00:07,741 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42197 2024-12-03T05:00:07,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42197 2024-12-03T05:00:07,772 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/32beccd76b35:0 server-side Connection retries=45 2024-12-03T05:00:07,772 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T05:00:07,772 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T05:00:07,772 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T05:00:07,772 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T05:00:07,772 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T05:00:07,773 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T05:00:07,773 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T05:00:07,774 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37941 2024-12-03T05:00:07,776 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37941 connecting to ZooKeeper ensemble=127.0.0.1:53408 2024-12-03T05:00:07,777 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:00:07,781 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:00:07,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:379410x0, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T05:00:07,787 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:379410x0, quorum=127.0.0.1:53408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T05:00:07,788 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T05:00:07,789 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37941-0x1015176c80d0001 connected 2024-12-03T05:00:07,814 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T05:00:07,822 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T05:00:07,826 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T05:00:07,849 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37941 2024-12-03T05:00:07,850 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37941 2024-12-03T05:00:07,850 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37941 2024-12-03T05:00:07,865 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37941 2024-12-03T05:00:07,877 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37941 2024-12-03T05:00:07,932 DEBUG [M:0;32beccd76b35:42197 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;32beccd76b35:42197 2024-12-03T05:00:07,942 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/32beccd76b35,42197,1733202007399 2024-12-03T05:00:07,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T05:00:07,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T05:00:07,958 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/32beccd76b35,42197,1733202007399 2024-12-03T05:00:07,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T05:00:07,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:00:07,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:00:07,961 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T05:00:07,962 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/32beccd76b35,42197,1733202007399 from backup master directory 2024-12-03T05:00:07,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/32beccd76b35,42197,1733202007399 2024-12-03T05:00:07,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T05:00:07,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T05:00:07,964 WARN [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T05:00:07,965 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=32beccd76b35,42197,1733202007399 2024-12-03T05:00:07,972 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/hbase.id] with ID: db87459d-d6c0-4087-9b5f-c9404e8a216c 2024-12-03T05:00:07,972 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/.tmp/hbase.id 2024-12-03T05:00:08,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741826_1002 (size=42) 2024-12-03T05:00:08,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741826_1002 (size=42) 2024-12-03T05:00:08,077 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/.tmp/hbase.id]:[hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/hbase.id] 2024-12-03T05:00:08,104 INFO [master/32beccd76b35:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:00:08,104 INFO [master/32beccd76b35:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T05:00:08,106 INFO [master/32beccd76b35:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-03T05:00:08,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:00:08,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:00:08,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741827_1003 (size=196) 2024-12-03T05:00:08,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741827_1003 (size=196) 2024-12-03T05:00:08,192 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T05:00:08,193 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T05:00:08,210 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T05:00:08,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:08,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741828_1004 (size=1189) 2024-12-03T05:00:08,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741828_1004 (size=1189) 2024-12-03T05:00:08,298 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store 2024-12-03T05:00:08,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:08,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741829_1005 (size=34) 2024-12-03T05:00:08,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741829_1005 (size=34) 2024-12-03T05:00:08,335 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:00:08,335 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T05:00:08,335 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:00:08,335 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:00:08,335 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T05:00:08,335 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:00:08,335 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:00:08,335 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733202008335Disabling compacts and flushes for region at 1733202008335Disabling writes for close at 1733202008335Writing region close event to WAL at 1733202008335Closed at 1733202008335 2024-12-03T05:00:08,338 WARN [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/.initializing 2024-12-03T05:00:08,338 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/WALs/32beccd76b35,42197,1733202007399 2024-12-03T05:00:08,346 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C42197%2C1733202007399, suffix=, logDir=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/WALs/32beccd76b35,42197,1733202007399, archiveDir=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/oldWALs, maxLogs=10 2024-12-03T05:00:08,347 INFO [master/32beccd76b35:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C42197%2C1733202007399.1733202008347 2024-12-03T05:00:08,416 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/WALs/32beccd76b35,42197,1733202007399/32beccd76b35%2C42197%2C1733202007399.1733202008347 2024-12-03T05:00:08,439 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33301:33301),(127.0.0.1/127.0.0.1:41323:41323)] 2024-12-03T05:00:08,454 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T05:00:08,454 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:00:08,454 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:00:08,455 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:00:08,477 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:00:08,480 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T05:00:08,480 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:00:08,480 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:00:08,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:00:08,482 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T05:00:08,482 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:00:08,483 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T05:00:08,483 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:00:08,485 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T05:00:08,485 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:00:08,485 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T05:00:08,486 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:00:08,487 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T05:00:08,487 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:00:08,488 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T05:00:08,488 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:00:08,489 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:00:08,489 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:00:08,492 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:00:08,492 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:00:08,493 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T05:00:08,494 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:00:08,506 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T05:00:08,507 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808908, jitterRate=0.028580859303474426}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T05:00:08,508 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733202008455Initializing all the Stores at 1733202008456 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202008456Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202008477 (+21 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202008477Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202008477Cleaning up temporary data from old regions at 1733202008492 (+15 ms)Region opened successfully at 1733202008507 (+15 ms) 2024-12-03T05:00:08,525 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T05:00:08,530 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33089a20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32beccd76b35/172.17.0.3:0 2024-12-03T05:00:08,531 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T05:00:08,531 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T05:00:08,531 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T05:00:08,532 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T05:00:08,532 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T05:00:08,533 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T05:00:08,533 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T05:00:08,537 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T05:00:08,538 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T05:00:08,540 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T05:00:08,540 INFO [master/32beccd76b35:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T05:00:08,542 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T05:00:08,544 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T05:00:08,544 INFO [master/32beccd76b35:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T05:00:08,556 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T05:00:08,558 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T05:00:08,559 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T05:00:08,561 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T05:00:08,563 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T05:00:08,564 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T05:00:08,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T05:00:08,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:00:08,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T05:00:08,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:00:08,570 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=32beccd76b35,42197,1733202007399, sessionid=0x1015176c80d0000, setting cluster-up flag (Was=false) 2024-12-03T05:00:08,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:00:08,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:00:08,616 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T05:00:08,617 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=32beccd76b35,42197,1733202007399 2024-12-03T05:00:08,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:00:08,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:00:08,632 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T05:00:08,641 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=32beccd76b35,42197,1733202007399 2024-12-03T05:00:08,658 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T05:00:08,674 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T05:00:08,675 INFO [master/32beccd76b35:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T05:00:08,675 INFO [master/32beccd76b35:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T05:00:08,675 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 32beccd76b35,42197,1733202007399 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T05:00:08,679 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T05:00:08,680 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T05:00:08,680 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T05:00:08,680 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T05:00:08,680 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/32beccd76b35:0, corePoolSize=10, maxPoolSize=10 2024-12-03T05:00:08,680 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:00:08,680 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/32beccd76b35:0, corePoolSize=2, maxPoolSize=2 2024-12-03T05:00:08,680 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:00:08,683 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T05:00:08,683 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T05:00:08,685 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:00:08,685 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T05:00:08,707 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733202038707 2024-12-03T05:00:08,707 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T05:00:08,707 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T05:00:08,707 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T05:00:08,707 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T05:00:08,707 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T05:00:08,708 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T05:00:08,708 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:08,708 INFO [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(746): ClusterId : db87459d-d6c0-4087-9b5f-c9404e8a216c 2024-12-03T05:00:08,708 DEBUG [RS:0;32beccd76b35:37941 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T05:00:08,711 DEBUG [RS:0;32beccd76b35:37941 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T05:00:08,711 DEBUG [RS:0;32beccd76b35:37941 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T05:00:08,713 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T05:00:08,713 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T05:00:08,714 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T05:00:08,721 DEBUG [RS:0;32beccd76b35:37941 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T05:00:08,722 DEBUG [RS:0;32beccd76b35:37941 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5320c5ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32beccd76b35/172.17.0.3:0 2024-12-03T05:00:08,737 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T05:00:08,737 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T05:00:08,745 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733202008737,5,FailOnTimeoutGroup] 2024-12-03T05:00:08,762 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733202008745,5,FailOnTimeoutGroup] 2024-12-03T05:00:08,762 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:08,762 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T05:00:08,762 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:08,762 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:08,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741831_1007 (size=1321) 2024-12-03T05:00:08,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741831_1007 (size=1321) 2024-12-03T05:00:08,769 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T05:00:08,769 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9 2024-12-03T05:00:08,779 DEBUG [RS:0;32beccd76b35:37941 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;32beccd76b35:37941 2024-12-03T05:00:08,780 INFO [RS:0;32beccd76b35:37941 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T05:00:08,780 INFO [RS:0;32beccd76b35:37941 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T05:00:08,780 DEBUG [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T05:00:08,781 INFO [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(2659): reportForDuty to master=32beccd76b35,42197,1733202007399 with port=37941, startcode=1733202007772 2024-12-03T05:00:08,781 DEBUG [RS:0;32beccd76b35:37941 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T05:00:08,790 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53093, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T05:00:08,791 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42197 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 32beccd76b35,37941,1733202007772 2024-12-03T05:00:08,791 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42197 {}] master.ServerManager(517): Registering regionserver=32beccd76b35,37941,1733202007772 2024-12-03T05:00:08,793 DEBUG [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9 2024-12-03T05:00:08,793 DEBUG [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36629 2024-12-03T05:00:08,793 DEBUG [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T05:00:08,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741832_1008 (size=32) 2024-12-03T05:00:08,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741832_1008 (size=32) 2024-12-03T05:00:08,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T05:00:08,797 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:00:08,798 DEBUG [RS:0;32beccd76b35:37941 {}] zookeeper.ZKUtil(111): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/32beccd76b35,37941,1733202007772 2024-12-03T05:00:08,798 WARN [RS:0;32beccd76b35:37941 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T05:00:08,798 INFO [RS:0;32beccd76b35:37941 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T05:00:08,798 DEBUG [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/WALs/32beccd76b35,37941,1733202007772 2024-12-03T05:00:08,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T05:00:08,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T05:00:08,802 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:00:08,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:00:08,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T05:00:08,804 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [32beccd76b35,37941,1733202007772] 2024-12-03T05:00:08,805 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T05:00:08,805 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:00:08,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:00:08,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T05:00:08,811 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T05:00:08,812 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:00:08,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:00:08,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T05:00:08,814 INFO [RS:0;32beccd76b35:37941 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T05:00:08,816 INFO [RS:0;32beccd76b35:37941 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T05:00:08,816 INFO [RS:0;32beccd76b35:37941 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T05:00:08,816 INFO [RS:0;32beccd76b35:37941 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:08,817 INFO [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T05:00:08,818 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T05:00:08,818 INFO [RS:0;32beccd76b35:37941 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T05:00:08,818 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:00:08,818 INFO [RS:0;32beccd76b35:37941 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:08,818 DEBUG [RS:0;32beccd76b35:37941 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:00:08,818 DEBUG [RS:0;32beccd76b35:37941 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:00:08,818 DEBUG [RS:0;32beccd76b35:37941 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:00:08,818 DEBUG [RS:0;32beccd76b35:37941 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:00:08,819 DEBUG [RS:0;32beccd76b35:37941 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:00:08,819 DEBUG [RS:0;32beccd76b35:37941 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/32beccd76b35:0, corePoolSize=2, maxPoolSize=2 2024-12-03T05:00:08,819 DEBUG [RS:0;32beccd76b35:37941 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:00:08,819 DEBUG [RS:0;32beccd76b35:37941 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:00:08,819 DEBUG [RS:0;32beccd76b35:37941 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:00:08,819 DEBUG [RS:0;32beccd76b35:37941 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:00:08,819 DEBUG [RS:0;32beccd76b35:37941 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:00:08,819 DEBUG [RS:0;32beccd76b35:37941 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:00:08,819 DEBUG [RS:0;32beccd76b35:37941 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/32beccd76b35:0, corePoolSize=3, maxPoolSize=3 2024-12-03T05:00:08,819 DEBUG [RS:0;32beccd76b35:37941 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0, corePoolSize=3, maxPoolSize=3 2024-12-03T05:00:08,820 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:00:08,820 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T05:00:08,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740 2024-12-03T05:00:08,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740 2024-12-03T05:00:08,824 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T05:00:08,824 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T05:00:08,825 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T05:00:08,826 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T05:00:08,843 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T05:00:08,844 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=837022, jitterRate=0.06432928144931793}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T05:00:08,845 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733202008797Initializing all the Stores at 1733202008798 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202008798Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202008798Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202008798Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202008798Cleaning up temporary data from old regions at 1733202008824 (+26 ms)Region opened successfully at 1733202008845 (+21 ms) 2024-12-03T05:00:08,845 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T05:00:08,845 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T05:00:08,845 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T05:00:08,845 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T05:00:08,845 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T05:00:08,865 INFO [RS:0;32beccd76b35:37941 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:08,865 INFO [RS:0;32beccd76b35:37941 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:08,865 INFO [RS:0;32beccd76b35:37941 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:08,865 INFO [RS:0;32beccd76b35:37941 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:08,865 INFO [RS:0;32beccd76b35:37941 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:08,865 INFO [RS:0;32beccd76b35:37941 {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,37941,1733202007772-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T05:00:08,898 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T05:00:08,898 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733202008845Disabling compacts and flushes for region at 1733202008845Disabling writes for close at 1733202008845Writing region close event to WAL at 1733202008898 (+53 ms)Closed at 1733202008898 2024-12-03T05:00:08,900 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T05:00:08,902 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T05:00:08,902 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T05:00:08,915 INFO [RS:0;32beccd76b35:37941 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T05:00:08,915 INFO [RS:0;32beccd76b35:37941 {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,37941,1733202007772-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:08,915 INFO [RS:0;32beccd76b35:37941 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:08,915 INFO [RS:0;32beccd76b35:37941 {}] regionserver.Replication(171): 32beccd76b35,37941,1733202007772 started 2024-12-03T05:00:08,917 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T05:00:08,919 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T05:00:08,973 INFO [RS:0;32beccd76b35:37941 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:08,974 INFO [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(1482): Serving as 32beccd76b35,37941,1733202007772, RpcServer on 32beccd76b35/172.17.0.3:37941, sessionid=0x1015176c80d0001 2024-12-03T05:00:08,974 DEBUG [RS:0;32beccd76b35:37941 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T05:00:08,974 DEBUG [RS:0;32beccd76b35:37941 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 32beccd76b35,37941,1733202007772 2024-12-03T05:00:08,974 DEBUG [RS:0;32beccd76b35:37941 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32beccd76b35,37941,1733202007772' 2024-12-03T05:00:08,974 DEBUG [RS:0;32beccd76b35:37941 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T05:00:08,975 DEBUG [RS:0;32beccd76b35:37941 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T05:00:08,975 DEBUG [RS:0;32beccd76b35:37941 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T05:00:08,981 DEBUG [RS:0;32beccd76b35:37941 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T05:00:08,981 DEBUG [RS:0;32beccd76b35:37941 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 32beccd76b35,37941,1733202007772 2024-12-03T05:00:08,981 DEBUG [RS:0;32beccd76b35:37941 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32beccd76b35,37941,1733202007772' 2024-12-03T05:00:08,981 DEBUG [RS:0;32beccd76b35:37941 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T05:00:08,982 DEBUG [RS:0;32beccd76b35:37941 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T05:00:08,982 DEBUG [RS:0;32beccd76b35:37941 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T05:00:08,982 INFO [RS:0;32beccd76b35:37941 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T05:00:08,982 INFO [RS:0;32beccd76b35:37941 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T05:00:09,069 WARN [32beccd76b35:42197 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T05:00:09,101 INFO [RS:0;32beccd76b35:37941 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C37941%2C1733202007772, suffix=, logDir=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/WALs/32beccd76b35,37941,1733202007772, archiveDir=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/oldWALs, maxLogs=32 2024-12-03T05:00:09,102 INFO [RS:0;32beccd76b35:37941 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C37941%2C1733202007772.1733202009101 2024-12-03T05:00:09,195 INFO [RS:0;32beccd76b35:37941 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/WALs/32beccd76b35,37941,1733202007772/32beccd76b35%2C37941%2C1733202007772.1733202009101 2024-12-03T05:00:09,240 DEBUG [RS:0;32beccd76b35:37941 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41323:41323),(127.0.0.1/127.0.0.1:33301:33301)] 2024-12-03T05:00:09,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:09,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:09,320 DEBUG [32beccd76b35:42197 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T05:00:09,320 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=32beccd76b35,37941,1733202007772 2024-12-03T05:00:09,322 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 32beccd76b35,37941,1733202007772, state=OPENING 2024-12-03T05:00:09,325 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T05:00:09,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:00:09,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:00:09,328 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T05:00:09,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=32beccd76b35,37941,1733202007772}] 2024-12-03T05:00:09,329 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T05:00:09,330 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T05:00:09,502 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T05:00:09,514 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40141, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T05:00:09,522 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T05:00:09,522 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T05:00:09,526 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C37941%2C1733202007772.meta, suffix=.meta, logDir=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/WALs/32beccd76b35,37941,1733202007772, archiveDir=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/oldWALs, maxLogs=32 2024-12-03T05:00:09,527 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C37941%2C1733202007772.meta.1733202009527.meta 2024-12-03T05:00:09,580 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/WALs/32beccd76b35,37941,1733202007772/32beccd76b35%2C37941%2C1733202007772.meta.1733202009527.meta 2024-12-03T05:00:09,631 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41323:41323),(127.0.0.1/127.0.0.1:33301:33301)] 2024-12-03T05:00:09,681 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T05:00:09,682 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T05:00:09,682 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T05:00:09,682 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T05:00:09,682 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T05:00:09,682 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:00:09,683 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T05:00:09,683 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T05:00:09,686 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T05:00:09,688 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T05:00:09,689 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:00:09,689 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:00:09,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T05:00:09,691 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T05:00:09,691 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:00:09,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:00:09,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T05:00:09,697 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T05:00:09,697 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:00:09,698 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:00:09,698 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T05:00:09,699 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T05:00:09,699 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:00:09,700 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:00:09,700 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T05:00:09,701 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740 2024-12-03T05:00:09,702 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740 2024-12-03T05:00:09,704 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T05:00:09,704 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T05:00:09,705 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T05:00:09,706 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T05:00:09,708 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=777774, jitterRate=-0.011009901762008667}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T05:00:09,708 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T05:00:09,708 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733202009683Writing region info on filesystem at 1733202009683Initializing all the Stores at 1733202009685 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202009685Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202009686 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202009686Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202009686Cleaning up temporary data from old regions at 1733202009704 (+18 ms)Running coprocessor post-open hooks at 1733202009708 (+4 ms)Region opened successfully at 1733202009708 2024-12-03T05:00:09,710 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733202009501 2024-12-03T05:00:09,714 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T05:00:09,714 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T05:00:09,715 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=32beccd76b35,37941,1733202007772 2024-12-03T05:00:09,716 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 32beccd76b35,37941,1733202007772, state=OPEN 2024-12-03T05:00:09,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T05:00:09,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T05:00:09,722 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T05:00:09,722 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T05:00:09,722 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=32beccd76b35,37941,1733202007772 2024-12-03T05:00:09,729 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T05:00:09,729 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=32beccd76b35,37941,1733202007772 in 395 msec 2024-12-03T05:00:09,733 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T05:00:09,733 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 828 msec 2024-12-03T05:00:09,734 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T05:00:09,734 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T05:00:09,736 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T05:00:09,736 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=32beccd76b35,37941,1733202007772, seqNum=-1] 2024-12-03T05:00:09,737 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T05:00:09,739 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46687, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T05:00:09,748 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0760 sec 2024-12-03T05:00:09,749 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733202009748, completionTime=-1 2024-12-03T05:00:09,749 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T05:00:09,749 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T05:00:09,751 INFO [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T05:00:09,751 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733202069751 2024-12-03T05:00:09,751 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733202129751 2024-12-03T05:00:09,751 INFO [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-03T05:00:09,752 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,42197,1733202007399-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:09,752 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,42197,1733202007399-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:09,752 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,42197,1733202007399-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:09,752 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-32beccd76b35:42197, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:09,752 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:09,754 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:09,754 DEBUG [master/32beccd76b35:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T05:00:09,765 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.800sec 2024-12-03T05:00:09,765 INFO [master/32beccd76b35:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T05:00:09,765 INFO [master/32beccd76b35:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T05:00:09,765 INFO [master/32beccd76b35:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T05:00:09,765 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T05:00:09,766 INFO [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T05:00:09,766 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,42197,1733202007399-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T05:00:09,766 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,42197,1733202007399-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T05:00:09,787 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T05:00:09,787 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T05:00:09,787 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,42197,1733202007399-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:09,822 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48c024a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T05:00:09,822 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 32beccd76b35,42197,-1 for getting cluster id 2024-12-03T05:00:09,822 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T05:00:09,835 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'db87459d-d6c0-4087-9b5f-c9404e8a216c' 2024-12-03T05:00:09,836 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T05:00:09,836 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "db87459d-d6c0-4087-9b5f-c9404e8a216c" 2024-12-03T05:00:09,836 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c7734d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T05:00:09,836 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [32beccd76b35,42197,-1] 2024-12-03T05:00:09,837 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T05:00:09,837 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:00:09,839 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58636, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T05:00:09,840 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26045f23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T05:00:09,841 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T05:00:09,843 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=32beccd76b35,37941,1733202007772, seqNum=-1] 2024-12-03T05:00:09,843 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T05:00:09,844 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58214, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T05:00:09,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=32beccd76b35,42197,1733202007399 2024-12-03T05:00:09,847 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:00:09,851 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T05:00:09,851 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T05:00:09,853 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 32beccd76b35,42197,1733202007399 2024-12-03T05:00:09,853 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3672bf73 2024-12-03T05:00:09,853 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T05:00:09,855 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58652, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T05:00:09,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T05:00:09,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T05:00:09,856 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T05:00:09,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T05:00:09,859 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T05:00:09,859 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:00:09,861 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T05:00:09,862 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-03T05:00:09,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T05:00:09,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741835_1011 (size=405) 2024-12-03T05:00:09,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741835_1011 (size=405) 2024-12-03T05:00:09,946 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c499cc11a2db027abe112d2371bcab77, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9 2024-12-03T05:00:09,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741836_1012 (size=88) 2024-12-03T05:00:09,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741836_1012 (size=88) 2024-12-03T05:00:09,972 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:00:09,972 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing c499cc11a2db027abe112d2371bcab77, disabling compactions & flushes 2024-12-03T05:00:09,972 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:09,972 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:09,972 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. after waiting 0 ms 2024-12-03T05:00:09,972 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:09,972 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:09,972 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for c499cc11a2db027abe112d2371bcab77: Waiting for close lock at 1733202009972Disabling compacts and flushes for region at 1733202009972Disabling writes for close at 1733202009972Writing region close event to WAL at 1733202009972Closed at 1733202009972 2024-12-03T05:00:09,974 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T05:00:09,975 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733202009975"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733202009975"}]},"ts":"1733202009975"} 2024-12-03T05:00:09,978 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T05:00:09,979 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T05:00:09,980 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733202009980"}]},"ts":"1733202009980"} 2024-12-03T05:00:09,982 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-03T05:00:09,983 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c499cc11a2db027abe112d2371bcab77, ASSIGN}] 2024-12-03T05:00:09,985 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c499cc11a2db027abe112d2371bcab77, ASSIGN 2024-12-03T05:00:09,987 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c499cc11a2db027abe112d2371bcab77, ASSIGN; state=OFFLINE, location=32beccd76b35,37941,1733202007772; forceNewPlan=false, retain=false 2024-12-03T05:00:10,141 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c499cc11a2db027abe112d2371bcab77, regionState=OPENING, regionLocation=32beccd76b35,37941,1733202007772 2024-12-03T05:00:10,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c499cc11a2db027abe112d2371bcab77, ASSIGN because future has completed 2024-12-03T05:00:10,162 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c499cc11a2db027abe112d2371bcab77, server=32beccd76b35,37941,1733202007772}] 2024-12-03T05:00:10,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:10,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:10,330 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:10,330 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c499cc11a2db027abe112d2371bcab77, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77.', STARTKEY => '', ENDKEY => ''} 2024-12-03T05:00:10,342 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling c499cc11a2db027abe112d2371bcab77 2024-12-03T05:00:10,342 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:00:10,342 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c499cc11a2db027abe112d2371bcab77 2024-12-03T05:00:10,342 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c499cc11a2db027abe112d2371bcab77 2024-12-03T05:00:10,362 INFO [StoreOpener-c499cc11a2db027abe112d2371bcab77-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c499cc11a2db027abe112d2371bcab77 2024-12-03T05:00:10,369 INFO [StoreOpener-c499cc11a2db027abe112d2371bcab77-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c499cc11a2db027abe112d2371bcab77 columnFamilyName info 2024-12-03T05:00:10,370 DEBUG [StoreOpener-c499cc11a2db027abe112d2371bcab77-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:00:10,371 INFO [StoreOpener-c499cc11a2db027abe112d2371bcab77-1 {}] regionserver.HStore(327): Store=c499cc11a2db027abe112d2371bcab77/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T05:00:10,371 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c499cc11a2db027abe112d2371bcab77 2024-12-03T05:00:10,372 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77 2024-12-03T05:00:10,373 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77 2024-12-03T05:00:10,375 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c499cc11a2db027abe112d2371bcab77 2024-12-03T05:00:10,375 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c499cc11a2db027abe112d2371bcab77 2024-12-03T05:00:10,406 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c499cc11a2db027abe112d2371bcab77 2024-12-03T05:00:10,427 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T05:00:10,427 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c499cc11a2db027abe112d2371bcab77; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=741310, jitterRate=-0.057375818490982056}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T05:00:10,428 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c499cc11a2db027abe112d2371bcab77 2024-12-03T05:00:10,428 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c499cc11a2db027abe112d2371bcab77: Running coprocessor pre-open hook at 1733202010342Writing region info on filesystem at 1733202010342Initializing all the Stores at 1733202010343 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202010343Cleaning up temporary data from old regions at 1733202010375 (+32 ms)Running coprocessor post-open hooks at 1733202010428 (+53 ms)Region opened successfully at 1733202010428 2024-12-03T05:00:10,430 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77., pid=6, masterSystemTime=1733202010317 2024-12-03T05:00:10,435 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:10,436 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:10,436 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c499cc11a2db027abe112d2371bcab77, regionState=OPEN, openSeqNum=2, regionLocation=32beccd76b35,37941,1733202007772 2024-12-03T05:00:10,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c499cc11a2db027abe112d2371bcab77, server=32beccd76b35,37941,1733202007772 because future has completed 2024-12-03T05:00:10,455 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T05:00:10,456 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c499cc11a2db027abe112d2371bcab77, server=32beccd76b35,37941,1733202007772 in 290 msec 2024-12-03T05:00:10,459 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T05:00:10,459 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c499cc11a2db027abe112d2371bcab77, ASSIGN in 473 msec 2024-12-03T05:00:10,461 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T05:00:10,461 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733202010461"}]},"ts":"1733202010461"} 2024-12-03T05:00:10,465 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-03T05:00:10,467 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T05:00:10,469 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 611 msec 2024-12-03T05:00:11,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:11,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:12,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:12,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:12,833 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T05:00:12,833 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-03T05:00:12,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T05:00:12,834 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-03T05:00:12,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-03T05:00:12,834 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-03T05:00:12,835 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T05:00:12,835 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-03T05:00:13,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:13,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:14,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:14,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:14,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:14,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:14,684 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:14,684 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:14,684 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:14,684 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:14,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:14,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:14,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:14,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:14,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:14,733 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:14,744 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:14,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:14,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:14,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,255 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T05:00:15,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,265 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:15,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:15,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,345 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,345 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,346 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,346 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,352 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,355 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,371 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:00:15,384 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T05:00:15,384 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-03T05:00:16,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:16,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:17,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:17,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:18,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:18,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:19,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:19,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:19,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T05:00:19,931 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-03T05:00:19,931 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-03T05:00:19,939 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T05:00:19,939 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:19,943 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77., hostname=32beccd76b35,37941,1733202007772, seqNum=2] 2024-12-03T05:00:19,964 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T05:00:19,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T05:00:19,975 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-03T05:00:19,976 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T05:00:19,978 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T05:00:19,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42197 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T05:00:20,145 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37941 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-03T05:00:20,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:20,150 INFO [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing c499cc11a2db027abe112d2371bcab77 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T05:00:20,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/b7449407503f41459571f3b878bba6cb is 1080, key is row0001/info:/1733202019945/Put/seqid=0 2024-12-03T05:00:20,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741837_1013 (size=6033) 2024-12-03T05:00:20,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741837_1013 (size=6033) 2024-12-03T05:00:20,221 INFO [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/b7449407503f41459571f3b878bba6cb 2024-12-03T05:00:20,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/b7449407503f41459571f3b878bba6cb as hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/b7449407503f41459571f3b878bba6cb 2024-12-03T05:00:20,268 INFO [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/b7449407503f41459571f3b878bba6cb, entries=1, sequenceid=5, filesize=5.9 K 2024-12-03T05:00:20,269 INFO [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c499cc11a2db027abe112d2371bcab77 in 119ms, sequenceid=5, compaction requested=false 2024-12-03T05:00:20,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for c499cc11a2db027abe112d2371bcab77: 2024-12-03T05:00:20,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:20,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-03T05:00:20,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-03T05:00:20,280 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-03T05:00:20,280 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 298 msec 2024-12-03T05:00:20,284 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 315 msec 2024-12-03T05:00:20,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:20,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:21,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:21,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:22,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:22,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:23,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:23,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:24,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:24,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:25,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:25,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:26,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:26,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:27,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:27,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:28,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:28,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:29,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:29,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:30,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-03T05:00:30,080 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-03T05:00:30,083 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T05:00:30,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T05:00:30,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-03T05:00:30,085 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-03T05:00:30,086 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T05:00:30,086 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T05:00:30,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37941 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-03T05:00:30,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:30,240 INFO [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing c499cc11a2db027abe112d2371bcab77 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T05:00:30,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/2eb0237368ec4f4c8eb6022198696f79 is 1080, key is row0002/info:/1733202030081/Put/seqid=0 2024-12-03T05:00:30,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741838_1014 (size=6033) 2024-12-03T05:00:30,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741838_1014 (size=6033) 2024-12-03T05:00:30,256 INFO [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/2eb0237368ec4f4c8eb6022198696f79 2024-12-03T05:00:30,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/2eb0237368ec4f4c8eb6022198696f79 as hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/2eb0237368ec4f4c8eb6022198696f79 2024-12-03T05:00:30,268 INFO [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/2eb0237368ec4f4c8eb6022198696f79, entries=1, sequenceid=9, filesize=5.9 K 2024-12-03T05:00:30,270 INFO [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c499cc11a2db027abe112d2371bcab77 in 30ms, sequenceid=9, compaction requested=false 2024-12-03T05:00:30,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for c499cc11a2db027abe112d2371bcab77: 2024-12-03T05:00:30,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:30,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-03T05:00:30,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-03T05:00:30,274 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-03T05:00:30,274 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-12-03T05:00:30,277 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 192 msec 2024-12-03T05:00:30,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:30,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:31,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:31,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 after 68081ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T05:00:31,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:31,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta after 68063ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T05:00:32,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:32,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:33,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:33,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:34,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:34,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:35,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:35,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:36,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:36,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:36,835 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T05:00:37,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:37,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:38,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:38,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:39,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:39,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:40,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-03T05:00:40,162 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-03T05:00:40,178 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C37941%2C1733202007772.1733202040178 2024-12-03T05:00:40,252 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:40,252 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:40,252 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:40,252 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:40,252 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:40,253 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/WALs/32beccd76b35,37941,1733202007772/32beccd76b35%2C37941%2C1733202007772.1733202009101 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/WALs/32beccd76b35,37941,1733202007772/32beccd76b35%2C37941%2C1733202007772.1733202040178 2024-12-03T05:00:40,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741833_1009 (size=5546) 2024-12-03T05:00:40,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741833_1009 (size=5546) 2024-12-03T05:00:40,269 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41323:41323),(127.0.0.1/127.0.0.1:33301:33301)] 2024-12-03T05:00:40,270 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T05:00:40,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T05:00:40,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-03T05:00:40,273 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-03T05:00:40,275 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T05:00:40,276 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T05:00:40,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:40,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:40,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37941 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-03T05:00:40,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:40,442 INFO [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing c499cc11a2db027abe112d2371bcab77 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T05:00:40,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/604b6abd18834308bba5edc4c78be511 is 1080, key is row0003/info:/1733202040165/Put/seqid=0 2024-12-03T05:00:40,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741840_1016 (size=6033) 2024-12-03T05:00:40,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741840_1016 (size=6033) 2024-12-03T05:00:40,551 INFO [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/604b6abd18834308bba5edc4c78be511 2024-12-03T05:00:40,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/604b6abd18834308bba5edc4c78be511 as hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/604b6abd18834308bba5edc4c78be511 2024-12-03T05:00:40,590 INFO [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/604b6abd18834308bba5edc4c78be511, entries=1, sequenceid=13, filesize=5.9 K 2024-12-03T05:00:40,594 INFO [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c499cc11a2db027abe112d2371bcab77 in 153ms, sequenceid=13, compaction requested=true 2024-12-03T05:00:40,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for c499cc11a2db027abe112d2371bcab77: 2024-12-03T05:00:40,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:40,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-03T05:00:40,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-03T05:00:40,606 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-03T05:00:40,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 327 msec 2024-12-03T05:00:40,612 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 338 msec 2024-12-03T05:00:41,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:41,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:42,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:42,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:43,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:43,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:44,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:44,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:45,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:45,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:46,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:46,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:47,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:47,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:48,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:48,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:49,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:49,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:49,857 INFO [master/32beccd76b35:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T05:00:49,857 INFO [master/32beccd76b35:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T05:00:50,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:50,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:50,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-03T05:00:50,352 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-03T05:00:50,352 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T05:00:50,359 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T05:00:50,360 DEBUG [Time-limited test {}] regionserver.HStore(1541): c499cc11a2db027abe112d2371bcab77/info is initiating minor compaction (all files) 2024-12-03T05:00:50,360 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T05:00:50,360 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T05:00:50,360 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of c499cc11a2db027abe112d2371bcab77/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:50,360 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/b7449407503f41459571f3b878bba6cb, hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/2eb0237368ec4f4c8eb6022198696f79, hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/604b6abd18834308bba5edc4c78be511] into tmpdir=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp, totalSize=17.7 K 2024-12-03T05:00:50,366 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting b7449407503f41459571f3b878bba6cb, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733202019945 2024-12-03T05:00:50,369 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 2eb0237368ec4f4c8eb6022198696f79, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733202030081 2024-12-03T05:00:50,370 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 604b6abd18834308bba5edc4c78be511, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733202040165 2024-12-03T05:00:50,436 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): c499cc11a2db027abe112d2371bcab77#info#compaction#44 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T05:00:50,438 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/a0a95d7258ec45b980562a81cb821de5 is 1080, key is row0001/info:/1733202019945/Put/seqid=0 2024-12-03T05:00:50,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741841_1017 (size=8296) 2024-12-03T05:00:50,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741841_1017 (size=8296) 2024-12-03T05:00:50,528 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/a0a95d7258ec45b980562a81cb821de5 as hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/a0a95d7258ec45b980562a81cb821de5 2024-12-03T05:00:50,542 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c499cc11a2db027abe112d2371bcab77/info of c499cc11a2db027abe112d2371bcab77 into a0a95d7258ec45b980562a81cb821de5(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T05:00:50,543 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for c499cc11a2db027abe112d2371bcab77: 2024-12-03T05:00:50,546 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C37941%2C1733202007772.1733202050546 2024-12-03T05:00:50,575 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:50,575 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:50,575 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:50,575 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:50,575 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:00:50,576 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/WALs/32beccd76b35,37941,1733202007772/32beccd76b35%2C37941%2C1733202007772.1733202040178 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/WALs/32beccd76b35,37941,1733202007772/32beccd76b35%2C37941%2C1733202007772.1733202050546 2024-12-03T05:00:50,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741839_1015 (size=2520) 2024-12-03T05:00:50,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741839_1015 (size=2520) 2024-12-03T05:00:50,597 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41323:41323),(127.0.0.1/127.0.0.1:33301:33301)] 2024-12-03T05:00:50,601 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/WALs/32beccd76b35,37941,1733202007772/32beccd76b35%2C37941%2C1733202007772.1733202009101 to hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/oldWALs/32beccd76b35%2C37941%2C1733202007772.1733202009101 2024-12-03T05:00:50,603 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T05:00:50,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T05:00:50,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-03T05:00:50,609 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-03T05:00:50,610 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T05:00:50,610 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T05:00:50,765 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37941 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-03T05:00:50,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:50,766 INFO [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing c499cc11a2db027abe112d2371bcab77 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T05:00:50,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/c258cdb1d0a548d1a939746d3a31505c is 1080, key is row0000/info:/1733202050544/Put/seqid=0 2024-12-03T05:00:50,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741843_1019 (size=6033) 2024-12-03T05:00:50,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741843_1019 (size=6033) 2024-12-03T05:00:50,831 INFO [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/c258cdb1d0a548d1a939746d3a31505c 2024-12-03T05:00:50,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/c258cdb1d0a548d1a939746d3a31505c as hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/c258cdb1d0a548d1a939746d3a31505c 2024-12-03T05:00:50,847 INFO [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/c258cdb1d0a548d1a939746d3a31505c, entries=1, sequenceid=18, filesize=5.9 K 2024-12-03T05:00:50,853 INFO [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c499cc11a2db027abe112d2371bcab77 in 87ms, sequenceid=18, compaction requested=false 2024-12-03T05:00:50,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for c499cc11a2db027abe112d2371bcab77: 2024-12-03T05:00:50,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:00:50,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-03T05:00:50,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-03T05:00:50,860 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-03T05:00:50,860 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 245 msec 2024-12-03T05:00:50,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 258 msec 2024-12-03T05:00:51,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:51,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:52,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:52,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:53,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:53,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:54,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:54,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:55,342 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c499cc11a2db027abe112d2371bcab77, had cached 0 bytes from a total of 14329 2024-12-03T05:00:55,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:55,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:56,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:56,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:57,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:57,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:58,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:58,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:59,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:00:59,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:00,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:00,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:00,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42197 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-03T05:01:00,610 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-03T05:01:00,614 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C37941%2C1733202007772.1733202060614 2024-12-03T05:01:00,626 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:00,626 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:00,627 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:00,627 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:00,633 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:00,633 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/WALs/32beccd76b35,37941,1733202007772/32beccd76b35%2C37941%2C1733202007772.1733202050546 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/WALs/32beccd76b35,37941,1733202007772/32beccd76b35%2C37941%2C1733202007772.1733202060614 2024-12-03T05:01:00,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741842_1018 (size=2026) 2024-12-03T05:01:00,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741842_1018 (size=2026) 2024-12-03T05:01:00,646 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33301:33301),(127.0.0.1/127.0.0.1:41323:41323)] 2024-12-03T05:01:00,646 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/WALs/32beccd76b35,37941,1733202007772/32beccd76b35%2C37941%2C1733202007772.1733202050546 is not closed yet, will try archiving it next time 2024-12-03T05:01:00,646 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/WALs/32beccd76b35,37941,1733202007772/32beccd76b35%2C37941%2C1733202007772.1733202040178 to hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/oldWALs/32beccd76b35%2C37941%2C1733202007772.1733202040178 2024-12-03T05:01:00,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T05:01:00,646 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T05:01:00,646 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T05:01:00,646 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:01:00,647 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T05:01:00,647 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:01:00,647 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T05:01:00,647 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1156108172, stopped=false 2024-12-03T05:01:00,647 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=32beccd76b35,42197,1733202007399 2024-12-03T05:01:00,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T05:01:00,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T05:01:00,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:01:00,649 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:01:00,649 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T05:01:00,650 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T05:01:00,650 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T05:01:00,650 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T05:01:00,650 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:01:00,651 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '32beccd76b35,37941,1733202007772' ***** 2024-12-03T05:01:00,651 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T05:01:00,651 INFO [RS:0;32beccd76b35:37941 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T05:01:00,651 INFO [RS:0;32beccd76b35:37941 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T05:01:00,651 INFO [RS:0;32beccd76b35:37941 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T05:01:00,651 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T05:01:00,651 INFO [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(3091): Received CLOSE for c499cc11a2db027abe112d2371bcab77 2024-12-03T05:01:00,653 INFO [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(959): stopping server 32beccd76b35,37941,1733202007772 2024-12-03T05:01:00,653 INFO [RS:0;32beccd76b35:37941 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T05:01:00,653 INFO [RS:0;32beccd76b35:37941 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;32beccd76b35:37941. 2024-12-03T05:01:00,653 DEBUG [RS:0;32beccd76b35:37941 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T05:01:00,653 DEBUG [RS:0;32beccd76b35:37941 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:01:00,653 INFO [RS:0;32beccd76b35:37941 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T05:01:00,653 INFO [RS:0;32beccd76b35:37941 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T05:01:00,654 INFO [RS:0;32beccd76b35:37941 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T05:01:00,654 INFO [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T05:01:00,654 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c499cc11a2db027abe112d2371bcab77, disabling compactions & flushes 2024-12-03T05:01:00,654 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:01:00,654 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:01:00,654 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. after waiting 0 ms 2024-12-03T05:01:00,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T05:01:00,654 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:01:00,654 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing c499cc11a2db027abe112d2371bcab77 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T05:01:00,657 INFO [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-03T05:01:00,657 DEBUG [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(1325): Online Regions={c499cc11a2db027abe112d2371bcab77=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77., 1588230740=hbase:meta,,1.1588230740} 2024-12-03T05:01:00,657 DEBUG [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c499cc11a2db027abe112d2371bcab77 2024-12-03T05:01:00,658 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T05:01:00,658 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T05:01:00,658 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T05:01:00,658 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T05:01:00,658 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T05:01:00,658 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-03T05:01:00,659 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/c01df47aadcc459c88c27956419c35cc is 1080, key is row0001/info:/1733202060612/Put/seqid=0 2024-12-03T05:01:00,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741845_1021 (size=6033) 2024-12-03T05:01:00,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741845_1021 (size=6033) 2024-12-03T05:01:00,666 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/c01df47aadcc459c88c27956419c35cc 2024-12-03T05:01:00,677 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/.tmp/info/c01df47aadcc459c88c27956419c35cc as hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/c01df47aadcc459c88c27956419c35cc 2024-12-03T05:01:00,682 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/.tmp/info/3bf1720550c44527bb2a9670fbc84a12 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77./info:regioninfo/1733202010436/Put/seqid=0 2024-12-03T05:01:00,683 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/c01df47aadcc459c88c27956419c35cc, entries=1, sequenceid=22, filesize=5.9 K 2024-12-03T05:01:00,684 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c499cc11a2db027abe112d2371bcab77 in 30ms, sequenceid=22, compaction requested=true 2024-12-03T05:01:00,692 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/b7449407503f41459571f3b878bba6cb, hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/2eb0237368ec4f4c8eb6022198696f79, hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/604b6abd18834308bba5edc4c78be511] to archive 2024-12-03T05:01:00,694 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T05:01:00,696 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/b7449407503f41459571f3b878bba6cb to hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/b7449407503f41459571f3b878bba6cb 2024-12-03T05:01:00,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741846_1022 (size=7308) 2024-12-03T05:01:00,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741846_1022 (size=7308) 2024-12-03T05:01:00,698 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/.tmp/info/3bf1720550c44527bb2a9670fbc84a12 2024-12-03T05:01:00,698 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/2eb0237368ec4f4c8eb6022198696f79 to hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/2eb0237368ec4f4c8eb6022198696f79 2024-12-03T05:01:00,700 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/604b6abd18834308bba5edc4c78be511 to hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/info/604b6abd18834308bba5edc4c78be511 2024-12-03T05:01:00,701 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=32beccd76b35:42197 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-03T05:01:00,701 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [b7449407503f41459571f3b878bba6cb=6033, 2eb0237368ec4f4c8eb6022198696f79=6033, 604b6abd18834308bba5edc4c78be511=6033] 2024-12-03T05:01:00,729 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c499cc11a2db027abe112d2371bcab77/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-03T05:01:00,730 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:01:00,730 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c499cc11a2db027abe112d2371bcab77: Waiting for close lock at 1733202060654Running coprocessor pre-close hooks at 1733202060654Disabling compacts and flushes for region at 1733202060654Disabling writes for close at 1733202060654Obtaining lock to block concurrent updates at 1733202060654Preparing flush snapshotting stores in c499cc11a2db027abe112d2371bcab77 at 1733202060654Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733202060655 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. at 1733202060655Flushing c499cc11a2db027abe112d2371bcab77/info: creating writer at 1733202060656 (+1 ms)Flushing c499cc11a2db027abe112d2371bcab77/info: appending metadata at 1733202060659 (+3 ms)Flushing c499cc11a2db027abe112d2371bcab77/info: closing flushed file at 1733202060659Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16584c2d: reopening flushed file at 1733202060676 (+17 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c499cc11a2db027abe112d2371bcab77 in 30ms, sequenceid=22, compaction requested=true at 1733202060684 (+8 ms)Writing region close event to WAL at 1733202060713 (+29 ms)Running coprocessor post-close hooks at 1733202060730 (+17 ms)Closed at 1733202060730 2024-12-03T05:01:00,730 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733202009855.c499cc11a2db027abe112d2371bcab77. 2024-12-03T05:01:00,732 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/.tmp/ns/a3bfc0870e9d447ab4ea6f22e6dafb34 is 43, key is default/ns:d/1733202009740/Put/seqid=0 2024-12-03T05:01:00,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741847_1023 (size=5153) 2024-12-03T05:01:00,752 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/.tmp/ns/a3bfc0870e9d447ab4ea6f22e6dafb34 2024-12-03T05:01:00,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741847_1023 (size=5153) 2024-12-03T05:01:00,793 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/.tmp/table/09938e96188443b2a2e0b50e7b5158cc is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733202010461/Put/seqid=0 2024-12-03T05:01:00,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741848_1024 (size=5508) 2024-12-03T05:01:00,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741848_1024 (size=5508) 2024-12-03T05:01:00,857 DEBUG [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-03T05:01:00,866 INFO [regionserver/32beccd76b35:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T05:01:00,866 INFO [regionserver/32beccd76b35:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T05:01:00,887 INFO [regionserver/32beccd76b35:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T05:01:01,041 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/WALs/32beccd76b35,37941,1733202007772/32beccd76b35%2C37941%2C1733202007772.1733202050546 to hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/oldWALs/32beccd76b35%2C37941%2C1733202007772.1733202050546 2024-12-03T05:01:01,058 DEBUG [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-03T05:01:01,219 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/.tmp/table/09938e96188443b2a2e0b50e7b5158cc 2024-12-03T05:01:01,227 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/.tmp/info/3bf1720550c44527bb2a9670fbc84a12 as hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/info/3bf1720550c44527bb2a9670fbc84a12 2024-12-03T05:01:01,232 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/info/3bf1720550c44527bb2a9670fbc84a12, entries=10, sequenceid=11, filesize=7.1 K 2024-12-03T05:01:01,233 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/.tmp/ns/a3bfc0870e9d447ab4ea6f22e6dafb34 as hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/ns/a3bfc0870e9d447ab4ea6f22e6dafb34 2024-12-03T05:01:01,239 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/ns/a3bfc0870e9d447ab4ea6f22e6dafb34, entries=2, sequenceid=11, filesize=5.0 K 2024-12-03T05:01:01,240 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/.tmp/table/09938e96188443b2a2e0b50e7b5158cc as hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/table/09938e96188443b2a2e0b50e7b5158cc 2024-12-03T05:01:01,245 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/table/09938e96188443b2a2e0b50e7b5158cc, entries=2, sequenceid=11, filesize=5.4 K 2024-12-03T05:01:01,247 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 589ms, sequenceid=11, compaction requested=false 2024-12-03T05:01:01,257 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-03T05:01:01,258 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T05:01:01,258 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T05:01:01,258 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733202060657Running coprocessor pre-close hooks at 1733202060657Disabling compacts and flushes for region at 1733202060657Disabling writes for close at 1733202060658 (+1 ms)Obtaining lock to block concurrent updates at 1733202060658Preparing flush snapshotting stores in 1588230740 at 1733202060658Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733202060658Flushing stores of hbase:meta,,1.1588230740 at 1733202060659 (+1 ms)Flushing 1588230740/info: creating writer at 1733202060659Flushing 1588230740/info: appending metadata at 1733202060681 (+22 ms)Flushing 1588230740/info: closing flushed file at 1733202060681Flushing 1588230740/ns: creating writer at 1733202060709 (+28 ms)Flushing 1588230740/ns: appending metadata at 1733202060732 (+23 ms)Flushing 1588230740/ns: closing flushed file at 1733202060732Flushing 1588230740/table: creating writer at 1733202060763 (+31 ms)Flushing 1588230740/table: appending metadata at 1733202060793 (+30 ms)Flushing 1588230740/table: closing flushed file at 1733202060793Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9ddaca5: reopening flushed file at 1733202061226 (+433 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1666b23: reopening flushed file at 1733202061232 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f7644: reopening flushed file at 1733202061239 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 589ms, sequenceid=11, compaction requested=false at 1733202061247 (+8 ms)Writing region close event to WAL at 1733202061253 (+6 ms)Running coprocessor post-close hooks at 1733202061258 (+5 ms)Closed at 1733202061258 2024-12-03T05:01:01,258 DEBUG [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-03T05:01:01,258 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T05:01:01,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:01,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:01,458 INFO [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(976): stopping server 32beccd76b35,37941,1733202007772; all regions closed. 2024-12-03T05:01:01,459 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:01,459 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:01,459 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:01,459 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:01,459 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:01,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741834_1010 (size=3306) 2024-12-03T05:01:01,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741834_1010 (size=3306) 2024-12-03T05:01:01,865 DEBUG [RS:0;32beccd76b35:37941 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/oldWALs 2024-12-03T05:01:01,865 INFO [RS:0;32beccd76b35:37941 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 32beccd76b35%2C37941%2C1733202007772.meta:.meta(num 1733202009527) 2024-12-03T05:01:01,866 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:01,866 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:01,866 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:01,866 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:01,866 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:01,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741844_1020 (size=1252) 2024-12-03T05:01:01,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741844_1020 (size=1252) 2024-12-03T05:01:01,874 DEBUG [RS:0;32beccd76b35:37941 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/oldWALs 2024-12-03T05:01:01,874 INFO [RS:0;32beccd76b35:37941 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 32beccd76b35%2C37941%2C1733202007772:(num 1733202060614) 2024-12-03T05:01:01,874 DEBUG [RS:0;32beccd76b35:37941 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:01:01,874 INFO [RS:0;32beccd76b35:37941 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T05:01:01,874 INFO [RS:0;32beccd76b35:37941 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T05:01:01,874 INFO [RS:0;32beccd76b35:37941 {}] hbase.ChoreService(370): Chore service for: regionserver/32beccd76b35:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T05:01:01,874 INFO [RS:0;32beccd76b35:37941 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T05:01:01,874 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T05:01:01,875 INFO [RS:0;32beccd76b35:37941 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37941 2024-12-03T05:01:01,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T05:01:01,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/32beccd76b35,37941,1733202007772 2024-12-03T05:01:01,877 INFO [RS:0;32beccd76b35:37941 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T05:01:01,879 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [32beccd76b35,37941,1733202007772] 2024-12-03T05:01:01,881 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/32beccd76b35,37941,1733202007772 already deleted, retry=false 2024-12-03T05:01:01,881 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 32beccd76b35,37941,1733202007772 expired; onlineServers=0 2024-12-03T05:01:01,881 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '32beccd76b35,42197,1733202007399' ***** 2024-12-03T05:01:01,881 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T05:01:01,881 INFO [M:0;32beccd76b35:42197 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T05:01:01,881 INFO [M:0;32beccd76b35:42197 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T05:01:01,881 DEBUG [M:0;32beccd76b35:42197 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T05:01:01,881 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T05:01:01,881 DEBUG [master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733202008737 {}] cleaner.HFileCleaner(306): Exit Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733202008737,5,FailOnTimeoutGroup] 2024-12-03T05:01:01,881 DEBUG [master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733202008745 {}] cleaner.HFileCleaner(306): Exit Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733202008745,5,FailOnTimeoutGroup] 2024-12-03T05:01:01,881 DEBUG [M:0;32beccd76b35:42197 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T05:01:01,881 INFO [M:0;32beccd76b35:42197 {}] hbase.ChoreService(370): Chore service for: master/32beccd76b35:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T05:01:01,881 INFO [M:0;32beccd76b35:42197 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T05:01:01,881 DEBUG [M:0;32beccd76b35:42197 {}] master.HMaster(1795): Stopping service threads 2024-12-03T05:01:01,881 INFO [M:0;32beccd76b35:42197 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T05:01:01,881 INFO [M:0;32beccd76b35:42197 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T05:01:01,882 INFO [M:0;32beccd76b35:42197 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T05:01:01,882 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T05:01:01,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T05:01:01,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:01:01,883 DEBUG [M:0;32beccd76b35:42197 {}] zookeeper.ZKUtil(347): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T05:01:01,883 WARN [M:0;32beccd76b35:42197 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T05:01:01,884 INFO [M:0;32beccd76b35:42197 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/.lastflushedseqids 2024-12-03T05:01:01,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741849_1025 (size=130) 2024-12-03T05:01:01,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741849_1025 (size=130) 2024-12-03T05:01:01,898 INFO [M:0;32beccd76b35:42197 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T05:01:01,898 INFO [M:0;32beccd76b35:42197 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T05:01:01,898 DEBUG [M:0;32beccd76b35:42197 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T05:01:01,898 INFO [M:0;32beccd76b35:42197 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:01:01,898 DEBUG [M:0;32beccd76b35:42197 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:01:01,898 DEBUG [M:0;32beccd76b35:42197 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T05:01:01,898 DEBUG [M:0;32beccd76b35:42197 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:01:01,898 INFO [M:0;32beccd76b35:42197 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.62 KB heapSize=55.02 KB 2024-12-03T05:01:01,915 DEBUG [M:0;32beccd76b35:42197 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3ee740af951749088ed3ea73dd79e4ac is 82, key is hbase:meta,,1/info:regioninfo/1733202009715/Put/seqid=0 2024-12-03T05:01:01,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741850_1026 (size=5672) 2024-12-03T05:01:01,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741850_1026 (size=5672) 2024-12-03T05:01:01,920 INFO [M:0;32beccd76b35:42197 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3ee740af951749088ed3ea73dd79e4ac 2024-12-03T05:01:01,954 DEBUG [M:0;32beccd76b35:42197 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a4504771268e4c7f87753d7a45178f11 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733202010468/Put/seqid=0 2024-12-03T05:01:01,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741851_1027 (size=7826) 2024-12-03T05:01:01,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741851_1027 (size=7826) 2024-12-03T05:01:01,964 INFO [M:0;32beccd76b35:42197 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.02 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a4504771268e4c7f87753d7a45178f11 2024-12-03T05:01:01,969 INFO [M:0;32beccd76b35:42197 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a4504771268e4c7f87753d7a45178f11 2024-12-03T05:01:01,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T05:01:01,979 INFO [RS:0;32beccd76b35:37941 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T05:01:01,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37941-0x1015176c80d0001, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T05:01:01,979 INFO [RS:0;32beccd76b35:37941 {}] regionserver.HRegionServer(1031): Exiting; stopping=32beccd76b35,37941,1733202007772; zookeeper connection closed. 2024-12-03T05:01:01,979 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@237a36f3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@237a36f3 2024-12-03T05:01:01,980 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T05:01:01,986 DEBUG [M:0;32beccd76b35:42197 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8df89ec7eed54330a029847936646be7 is 69, key is 32beccd76b35,37941,1733202007772/rs:state/1733202008791/Put/seqid=0 2024-12-03T05:01:01,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741852_1028 (size=5156) 2024-12-03T05:01:01,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741852_1028 (size=5156) 2024-12-03T05:01:01,998 INFO [M:0;32beccd76b35:42197 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8df89ec7eed54330a029847936646be7 2024-12-03T05:01:02,019 DEBUG [M:0;32beccd76b35:42197 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/239822d45d524eff9aa49436d0eb2c43 is 52, key is load_balancer_on/state:d/1733202009849/Put/seqid=0 2024-12-03T05:01:02,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741853_1029 (size=5056) 2024-12-03T05:01:02,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741853_1029 (size=5056) 2024-12-03T05:01:02,027 INFO [M:0;32beccd76b35:42197 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/239822d45d524eff9aa49436d0eb2c43 2024-12-03T05:01:02,033 DEBUG [M:0;32beccd76b35:42197 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3ee740af951749088ed3ea73dd79e4ac as hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3ee740af951749088ed3ea73dd79e4ac 2024-12-03T05:01:02,038 INFO [M:0;32beccd76b35:42197 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3ee740af951749088ed3ea73dd79e4ac, entries=8, sequenceid=121, filesize=5.5 K 2024-12-03T05:01:02,042 DEBUG [M:0;32beccd76b35:42197 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a4504771268e4c7f87753d7a45178f11 as hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a4504771268e4c7f87753d7a45178f11 2024-12-03T05:01:02,047 INFO [M:0;32beccd76b35:42197 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a4504771268e4c7f87753d7a45178f11 2024-12-03T05:01:02,047 INFO [M:0;32beccd76b35:42197 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a4504771268e4c7f87753d7a45178f11, entries=14, sequenceid=121, filesize=7.6 K 2024-12-03T05:01:02,053 DEBUG [M:0;32beccd76b35:42197 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8df89ec7eed54330a029847936646be7 as hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8df89ec7eed54330a029847936646be7 2024-12-03T05:01:02,061 INFO [M:0;32beccd76b35:42197 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8df89ec7eed54330a029847936646be7, entries=1, sequenceid=121, filesize=5.0 K 2024-12-03T05:01:02,062 DEBUG [M:0;32beccd76b35:42197 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/239822d45d524eff9aa49436d0eb2c43 as hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/239822d45d524eff9aa49436d0eb2c43 2024-12-03T05:01:02,067 INFO [M:0;32beccd76b35:42197 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36629/user/jenkins/test-data/c69ad5d4-3dd5-50f7-a58b-e3fcfc80f9b9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/239822d45d524eff9aa49436d0eb2c43, entries=1, sequenceid=121, filesize=4.9 K 2024-12-03T05:01:02,068 INFO [M:0;32beccd76b35:42197 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.62 KB/44668, heapSize ~54.96 KB/56280, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 170ms, sequenceid=121, compaction requested=false 2024-12-03T05:01:02,073 INFO [M:0;32beccd76b35:42197 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:01:02,073 DEBUG [M:0;32beccd76b35:42197 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733202061898Disabling compacts and flushes for region at 1733202061898Disabling writes for close at 1733202061898Obtaining lock to block concurrent updates at 1733202061898Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733202061898Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44668, getHeapSize=56280, getOffHeapSize=0, getCellsCount=140 at 1733202061899 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733202061899Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733202061899Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733202061914 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733202061914Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733202061926 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733202061954 (+28 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733202061954Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733202061970 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733202061985 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733202061985Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733202062003 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733202062019 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733202062019Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79757f4: reopening flushed file at 1733202062032 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25cc2d5c: reopening flushed file at 1733202062038 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f6e61bd: reopening flushed file at 1733202062047 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40c23e73: reopening flushed file at 1733202062062 (+15 ms)Finished flush of dataSize ~43.62 KB/44668, heapSize ~54.96 KB/56280, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 170ms, sequenceid=121, compaction requested=false at 1733202062068 (+6 ms)Writing region close event to WAL at 1733202062073 (+5 ms)Closed at 1733202062073 2024-12-03T05:01:02,074 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:02,074 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:02,074 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:02,074 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:02,074 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:01:02,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37069 is added to blk_1073741830_1006 (size=53065) 2024-12-03T05:01:02,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35113 is added to blk_1073741830_1006 (size=53065) 2024-12-03T05:01:02,077 INFO [M:0;32beccd76b35:42197 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T05:01:02,077 INFO [M:0;32beccd76b35:42197 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42197 2024-12-03T05:01:02,078 INFO [M:0;32beccd76b35:42197 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T05:01:02,078 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T05:01:02,180 INFO [M:0;32beccd76b35:42197 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T05:01:02,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T05:01:02,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42197-0x1015176c80d0000, quorum=127.0.0.1:53408, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T05:01:02,183 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@61122961{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T05:01:02,183 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15f0a5cf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T05:01:02,183 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T05:01:02,183 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27dc1529{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T05:01:02,183 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b8dfaa1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/hadoop.log.dir/,STOPPED} 2024-12-03T05:01:02,185 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T05:01:02,185 WARN [BP-1614527077-172.17.0.3-1733202005879 heartbeating to localhost/127.0.0.1:36629 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T05:01:02,186 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T05:01:02,186 WARN [BP-1614527077-172.17.0.3-1733202005879 heartbeating to localhost/127.0.0.1:36629 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1614527077-172.17.0.3-1733202005879 (Datanode Uuid 4562c1a3-be6e-41b7-b75c-6031be9c107d) service to localhost/127.0.0.1:36629 2024-12-03T05:01:02,186 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/cluster_90211db9-1009-0dcc-f315-1af3237589ae/data/data3/current/BP-1614527077-172.17.0.3-1733202005879 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T05:01:02,187 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/cluster_90211db9-1009-0dcc-f315-1af3237589ae/data/data4/current/BP-1614527077-172.17.0.3-1733202005879 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T05:01:02,187 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T05:01:02,190 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55e190a0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T05:01:02,190 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@cad26ca{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T05:01:02,191 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T05:01:02,191 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ad8fe14{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T05:01:02,191 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e21aa83{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/hadoop.log.dir/,STOPPED} 2024-12-03T05:01:02,193 WARN [BP-1614527077-172.17.0.3-1733202005879 heartbeating to localhost/127.0.0.1:36629 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T05:01:02,193 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T05:01:02,193 WARN [BP-1614527077-172.17.0.3-1733202005879 heartbeating to localhost/127.0.0.1:36629 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1614527077-172.17.0.3-1733202005879 (Datanode Uuid 868b60b5-14e2-47a9-9978-49b6406460c8) service to localhost/127.0.0.1:36629 2024-12-03T05:01:02,193 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T05:01:02,193 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/cluster_90211db9-1009-0dcc-f315-1af3237589ae/data/data1/current/BP-1614527077-172.17.0.3-1733202005879 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T05:01:02,194 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/cluster_90211db9-1009-0dcc-f315-1af3237589ae/data/data2/current/BP-1614527077-172.17.0.3-1733202005879 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T05:01:02,194 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T05:01:02,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@41398064{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T05:01:02,201 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1411d2ea{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T05:01:02,201 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T05:01:02,201 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26880a4e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T05:01:02,201 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a686da4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/hadoop.log.dir/,STOPPED} 2024-12-03T05:01:02,208 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T05:01:02,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T05:01:02,235 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 180) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:36629 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:36629 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36629 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36629 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36629 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:36629 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36629 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36629 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:36629 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=821 (was 912), ProcessCount=11 (was 11), AvailableMemoryMB=4157 (was 3529) - AvailableMemoryMB LEAK? - 2024-12-03T05:01:02,244 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=821, ProcessCount=11, AvailableMemoryMB=4156 2024-12-03T05:01:02,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T05:01:02,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/hadoop.log.dir so I do NOT create it in target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461 2024-12-03T05:01:02,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/575e4e31-8702-b6a4-1130-0fad9a7b1bcf/hadoop.tmp.dir so I do NOT create it in target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461 2024-12-03T05:01:02,244 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/cluster_bd6a4643-fcaa-14d8-80d1-ab69445c54a4, deleteOnExit=true 2024-12-03T05:01:02,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T05:01:02,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/test.cache.data in system properties and HBase conf 2024-12-03T05:01:02,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T05:01:02,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/hadoop.log.dir in system properties and HBase conf 2024-12-03T05:01:02,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T05:01:02,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T05:01:02,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T05:01:02,245 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T05:01:02,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T05:01:02,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T05:01:02,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T05:01:02,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T05:01:02,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T05:01:02,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T05:01:02,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T05:01:02,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T05:01:02,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T05:01:02,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/nfs.dump.dir in system properties and HBase conf 2024-12-03T05:01:02,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/java.io.tmpdir in system properties and HBase conf 2024-12-03T05:01:02,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T05:01:02,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T05:01:02,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T05:01:02,258 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T05:01:02,332 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T05:01:02,337 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T05:01:02,338 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T05:01:02,338 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T05:01:02,338 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T05:01:02,339 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T05:01:02,339 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8e75f7d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/hadoop.log.dir/,AVAILABLE} 2024-12-03T05:01:02,339 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2721c5f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T05:01:02,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:02,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:02,458 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6183d646{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/java.io.tmpdir/jetty-localhost-37853-hadoop-hdfs-3_4_1-tests_jar-_-any-8554645163093482518/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T05:01:02,459 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@12849da8{HTTP/1.1, (http/1.1)}{localhost:37853} 2024-12-03T05:01:02,459 INFO [Time-limited test {}] server.Server(415): Started @246621ms 2024-12-03T05:01:02,475 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T05:01:02,550 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T05:01:02,554 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T05:01:02,555 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T05:01:02,555 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T05:01:02,555 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T05:01:02,555 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3743fb08{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/hadoop.log.dir/,AVAILABLE} 2024-12-03T05:01:02,556 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d27641d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T05:01:02,712 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c45f3c2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/java.io.tmpdir/jetty-localhost-36355-hadoop-hdfs-3_4_1-tests_jar-_-any-6616673583158299247/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T05:01:02,713 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5dd84d20{HTTP/1.1, (http/1.1)}{localhost:36355} 2024-12-03T05:01:02,713 INFO [Time-limited test {}] server.Server(415): Started @246875ms 2024-12-03T05:01:02,714 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T05:01:02,756 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T05:01:02,759 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T05:01:02,764 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T05:01:02,764 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T05:01:02,764 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T05:01:02,766 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56dad49a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/hadoop.log.dir/,AVAILABLE} 2024-12-03T05:01:02,766 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d89d07b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T05:01:02,817 WARN [Thread-1943 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/cluster_bd6a4643-fcaa-14d8-80d1-ab69445c54a4/data/data1/current/BP-1385824722-172.17.0.3-1733202062277/current, will proceed with Du for space computation calculation, 2024-12-03T05:01:02,817 WARN [Thread-1944 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/cluster_bd6a4643-fcaa-14d8-80d1-ab69445c54a4/data/data2/current/BP-1385824722-172.17.0.3-1733202062277/current, will proceed with Du for space computation calculation, 2024-12-03T05:01:02,833 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T05:01:02,833 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T05:01:02,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-03T05:01:02,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-03T05:01:02,842 WARN [Thread-1922 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T05:01:02,845 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x152cb296a34645d5 with lease ID 0x35d7ab08d69f18e5: Processing first storage report for DS-a25a0da4-37ce-413b-a38e-b356cb524ca9 from datanode DatanodeRegistration(127.0.0.1:36125, datanodeUuid=5c35fb61-e1e7-4949-a6a0-9bfbf53ce13f, infoPort=42049, infoSecurePort=0, ipcPort=33523, storageInfo=lv=-57;cid=testClusterID;nsid=1054605898;c=1733202062277) 2024-12-03T05:01:02,845 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x152cb296a34645d5 with lease ID 0x35d7ab08d69f18e5: from storage DS-a25a0da4-37ce-413b-a38e-b356cb524ca9 node DatanodeRegistration(127.0.0.1:36125, datanodeUuid=5c35fb61-e1e7-4949-a6a0-9bfbf53ce13f, infoPort=42049, infoSecurePort=0, ipcPort=33523, storageInfo=lv=-57;cid=testClusterID;nsid=1054605898;c=1733202062277), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T05:01:02,845 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x152cb296a34645d5 with lease ID 0x35d7ab08d69f18e5: Processing first storage report for DS-41f03bee-5380-49c3-af00-1126391cc8b6 from datanode DatanodeRegistration(127.0.0.1:36125, datanodeUuid=5c35fb61-e1e7-4949-a6a0-9bfbf53ce13f, infoPort=42049, infoSecurePort=0, ipcPort=33523, storageInfo=lv=-57;cid=testClusterID;nsid=1054605898;c=1733202062277) 2024-12-03T05:01:02,845 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x152cb296a34645d5 with lease ID 0x35d7ab08d69f18e5: from storage DS-41f03bee-5380-49c3-af00-1126391cc8b6 node DatanodeRegistration(127.0.0.1:36125, datanodeUuid=5c35fb61-e1e7-4949-a6a0-9bfbf53ce13f, infoPort=42049, infoSecurePort=0, ipcPort=33523, storageInfo=lv=-57;cid=testClusterID;nsid=1054605898;c=1733202062277), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T05:01:02,933 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@432aed61{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/java.io.tmpdir/jetty-localhost-36843-hadoop-hdfs-3_4_1-tests_jar-_-any-913476412045335965/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T05:01:02,935 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f29755b{HTTP/1.1, (http/1.1)}{localhost:36843} 2024-12-03T05:01:02,935 INFO [Time-limited test {}] server.Server(415): Started @247097ms 2024-12-03T05:01:02,937 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T05:01:03,162 WARN [Thread-1969 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/cluster_bd6a4643-fcaa-14d8-80d1-ab69445c54a4/data/data3/current/BP-1385824722-172.17.0.3-1733202062277/current, will proceed with Du for space computation calculation, 2024-12-03T05:01:03,170 WARN [Thread-1970 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/cluster_bd6a4643-fcaa-14d8-80d1-ab69445c54a4/data/data4/current/BP-1385824722-172.17.0.3-1733202062277/current, will proceed with Du for space computation calculation, 2024-12-03T05:01:03,263 WARN [Thread-1958 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T05:01:03,269 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcaf14d4e123eadf2 with lease ID 0x35d7ab08d69f18e6: Processing first storage report for DS-d71ddaee-0c36-4352-8ecb-3f81aa3432ba from datanode DatanodeRegistration(127.0.0.1:36851, datanodeUuid=d1e3e6fe-73a3-47ed-8d3b-a6cd6abd4aa4, infoPort=34719, infoSecurePort=0, ipcPort=36437, storageInfo=lv=-57;cid=testClusterID;nsid=1054605898;c=1733202062277) 2024-12-03T05:01:03,269 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcaf14d4e123eadf2 with lease ID 0x35d7ab08d69f18e6: from storage DS-d71ddaee-0c36-4352-8ecb-3f81aa3432ba node DatanodeRegistration(127.0.0.1:36851, datanodeUuid=d1e3e6fe-73a3-47ed-8d3b-a6cd6abd4aa4, infoPort=34719, infoSecurePort=0, ipcPort=36437, storageInfo=lv=-57;cid=testClusterID;nsid=1054605898;c=1733202062277), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T05:01:03,269 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcaf14d4e123eadf2 with lease ID 0x35d7ab08d69f18e6: Processing first storage report for DS-e40b7973-b7b8-4774-b3c8-0903de147bab from datanode DatanodeRegistration(127.0.0.1:36851, datanodeUuid=d1e3e6fe-73a3-47ed-8d3b-a6cd6abd4aa4, infoPort=34719, infoSecurePort=0, ipcPort=36437, storageInfo=lv=-57;cid=testClusterID;nsid=1054605898;c=1733202062277) 2024-12-03T05:01:03,269 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcaf14d4e123eadf2 with lease ID 0x35d7ab08d69f18e6: from storage DS-e40b7973-b7b8-4774-b3c8-0903de147bab node DatanodeRegistration(127.0.0.1:36851, datanodeUuid=d1e3e6fe-73a3-47ed-8d3b-a6cd6abd4aa4, infoPort=34719, infoSecurePort=0, ipcPort=36437, storageInfo=lv=-57;cid=testClusterID;nsid=1054605898;c=1733202062277), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T05:01:03,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:03,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:03,360 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461 2024-12-03T05:01:03,364 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/cluster_bd6a4643-fcaa-14d8-80d1-ab69445c54a4/zookeeper_0, clientPort=52145, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/cluster_bd6a4643-fcaa-14d8-80d1-ab69445c54a4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/cluster_bd6a4643-fcaa-14d8-80d1-ab69445c54a4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T05:01:03,368 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52145 2024-12-03T05:01:03,368 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:01:03,370 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:01:03,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741825_1001 (size=7) 2024-12-03T05:01:03,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741825_1001 (size=7) 2024-12-03T05:01:03,390 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf with version=8 2024-12-03T05:01:03,391 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/hbase-staging 2024-12-03T05:01:03,393 INFO [Time-limited test {}] client.ConnectionUtils(128): master/32beccd76b35:0 server-side Connection retries=45 2024-12-03T05:01:03,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T05:01:03,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T05:01:03,393 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T05:01:03,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T05:01:03,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T05:01:03,393 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T05:01:03,393 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T05:01:03,394 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37267 2024-12-03T05:01:03,396 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37267 connecting to ZooKeeper ensemble=127.0.0.1:52145 2024-12-03T05:01:03,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:372670x0, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T05:01:03,409 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37267-0x1015177a4ae0000 connected 2024-12-03T05:01:03,465 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:01:03,467 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:01:03,477 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T05:01:03,477 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf, hbase.cluster.distributed=false 2024-12-03T05:01:03,479 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T05:01:03,489 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37267 2024-12-03T05:01:03,489 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37267 2024-12-03T05:01:03,501 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37267 2024-12-03T05:01:03,502 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37267 2024-12-03T05:01:03,503 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37267 2024-12-03T05:01:03,527 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/32beccd76b35:0 server-side Connection retries=45 2024-12-03T05:01:03,528 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T05:01:03,528 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T05:01:03,528 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T05:01:03,528 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T05:01:03,528 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T05:01:03,528 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T05:01:03,528 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T05:01:03,529 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36269 2024-12-03T05:01:03,531 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36269 connecting to ZooKeeper ensemble=127.0.0.1:52145 2024-12-03T05:01:03,532 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:01:03,535 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:01:03,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:362690x0, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T05:01:03,544 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:362690x0, quorum=127.0.0.1:52145, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T05:01:03,545 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T05:01:03,545 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36269-0x1015177a4ae0001 connected 2024-12-03T05:01:03,548 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T05:01:03,549 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T05:01:03,550 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T05:01:03,566 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36269 2024-12-03T05:01:03,577 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36269 2024-12-03T05:01:03,589 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36269 2024-12-03T05:01:03,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36269 2024-12-03T05:01:03,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36269 2024-12-03T05:01:03,632 DEBUG [M:0;32beccd76b35:37267 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;32beccd76b35:37267 2024-12-03T05:01:03,632 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/32beccd76b35,37267,1733202063393 2024-12-03T05:01:03,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T05:01:03,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T05:01:03,637 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/32beccd76b35,37267,1733202063393 2024-12-03T05:01:03,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T05:01:03,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:01:03,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:01:03,643 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T05:01:03,643 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/32beccd76b35,37267,1733202063393 from backup master directory 2024-12-03T05:01:03,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T05:01:03,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/32beccd76b35,37267,1733202063393 2024-12-03T05:01:03,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T05:01:03,649 WARN [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T05:01:03,649 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=32beccd76b35,37267,1733202063393 2024-12-03T05:01:03,654 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/hbase.id] with ID: 172a467f-90c9-4b20-8749-f53bf4e33436 2024-12-03T05:01:03,654 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/.tmp/hbase.id 2024-12-03T05:01:03,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741826_1002 (size=42) 2024-12-03T05:01:03,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741826_1002 (size=42) 2024-12-03T05:01:04,140 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/.tmp/hbase.id]:[hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/hbase.id] 2024-12-03T05:01:04,159 INFO [master/32beccd76b35:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:01:04,159 INFO [master/32beccd76b35:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T05:01:04,161 INFO [master/32beccd76b35:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-03T05:01:04,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:01:04,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:01:04,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741827_1003 (size=196) 2024-12-03T05:01:04,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741827_1003 (size=196) 2024-12-03T05:01:04,207 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T05:01:04,208 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T05:01:04,209 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T05:01:04,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741828_1004 (size=1189) 2024-12-03T05:01:04,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741828_1004 (size=1189) 2024-12-03T05:01:04,243 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store 2024-12-03T05:01:04,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741829_1005 (size=34) 2024-12-03T05:01:04,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741829_1005 (size=34) 2024-12-03T05:01:04,261 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:01:04,261 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T05:01:04,261 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:01:04,261 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:01:04,261 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T05:01:04,261 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:01:04,261 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:01:04,261 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733202064261Disabling compacts and flushes for region at 1733202064261Disabling writes for close at 1733202064261Writing region close event to WAL at 1733202064261Closed at 1733202064261 2024-12-03T05:01:04,262 WARN [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/.initializing 2024-12-03T05:01:04,262 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/WALs/32beccd76b35,37267,1733202063393 2024-12-03T05:01:04,266 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C37267%2C1733202063393, suffix=, logDir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/WALs/32beccd76b35,37267,1733202063393, archiveDir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/oldWALs, maxLogs=10 2024-12-03T05:01:04,266 INFO [master/32beccd76b35:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C37267%2C1733202063393.1733202064266 2024-12-03T05:01:04,310 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/WALs/32beccd76b35,37267,1733202063393/32beccd76b35%2C37267%2C1733202063393.1733202064266 2024-12-03T05:01:04,331 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42049:42049),(127.0.0.1/127.0.0.1:34719:34719)] 2024-12-03T05:01:04,337 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T05:01:04,337 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:01:04,337 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:01:04,337 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:01:04,345 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:01:04,347 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T05:01:04,347 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:04,347 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:01:04,347 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:01:04,350 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T05:01:04,350 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:04,350 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T05:01:04,350 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:01:04,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:04,351 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T05:01:04,351 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:04,352 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T05:01:04,352 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:01:04,353 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T05:01:04,353 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:04,353 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T05:01:04,353 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:01:04,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:04,356 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:01:04,356 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:01:04,357 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:01:04,357 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:01:04,358 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T05:01:04,361 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:01:04,368 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T05:01:04,369 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=701114, jitterRate=-0.10848836600780487}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T05:01:04,370 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733202064337Initializing all the Stores at 1733202064338 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202064338Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202064343 (+5 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202064343Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202064343Cleaning up temporary data from old regions at 1733202064357 (+14 ms)Region opened successfully at 1733202064370 (+13 ms) 2024-12-03T05:01:04,377 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T05:01:04,381 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7403a13b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32beccd76b35/172.17.0.3:0 2024-12-03T05:01:04,383 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T05:01:04,383 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T05:01:04,383 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T05:01:04,383 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T05:01:04,384 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T05:01:04,384 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T05:01:04,384 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T05:01:04,406 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T05:01:04,410 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T05:01:04,414 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T05:01:04,414 INFO [master/32beccd76b35:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T05:01:04,415 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T05:01:04,418 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T05:01:04,419 INFO [master/32beccd76b35:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T05:01:04,426 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T05:01:04,428 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T05:01:04,429 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T05:01:04,431 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T05:01:04,433 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T05:01:04,434 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T05:01:04,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T05:01:04,436 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:01:04,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T05:01:04,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:01:04,437 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=32beccd76b35,37267,1733202063393, sessionid=0x1015177a4ae0000, setting cluster-up flag (Was=false) 2024-12-03T05:01:04,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:01:04,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:01:04,448 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T05:01:04,450 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=32beccd76b35,37267,1733202063393 2024-12-03T05:01:04,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:01:04,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:01:04,460 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T05:01:04,462 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=32beccd76b35,37267,1733202063393 2024-12-03T05:01:04,463 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T05:01:04,468 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T05:01:04,468 INFO [master/32beccd76b35:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T05:01:04,468 INFO [master/32beccd76b35:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T05:01:04,468 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 32beccd76b35,37267,1733202063393 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T05:01:04,469 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T05:01:04,469 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T05:01:04,470 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T05:01:04,470 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T05:01:04,470 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/32beccd76b35:0, corePoolSize=10, maxPoolSize=10 2024-12-03T05:01:04,470 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:01:04,470 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/32beccd76b35:0, corePoolSize=2, maxPoolSize=2 2024-12-03T05:01:04,470 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:01:04,479 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T05:01:04,479 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T05:01:04,480 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:04,480 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T05:01:04,495 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733202094495 2024-12-03T05:01:04,495 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T05:01:04,495 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T05:01:04,495 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T05:01:04,495 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T05:01:04,495 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T05:01:04,495 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T05:01:04,497 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:04,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741831_1007 (size=1321) 2024-12-03T05:01:04,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741831_1007 (size=1321) 2024-12-03T05:01:04,506 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T05:01:04,507 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf 2024-12-03T05:01:04,513 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T05:01:04,513 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T05:01:04,513 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T05:01:04,518 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T05:01:04,518 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T05:01:04,518 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733202064518,5,FailOnTimeoutGroup] 2024-12-03T05:01:04,518 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733202064518,5,FailOnTimeoutGroup] 2024-12-03T05:01:04,518 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:04,518 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T05:01:04,518 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:04,518 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:04,523 INFO [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(746): ClusterId : 172a467f-90c9-4b20-8749-f53bf4e33436 2024-12-03T05:01:04,523 DEBUG [RS:0;32beccd76b35:36269 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T05:01:04,527 DEBUG [RS:0;32beccd76b35:36269 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T05:01:04,527 DEBUG [RS:0;32beccd76b35:36269 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T05:01:04,530 DEBUG [RS:0;32beccd76b35:36269 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T05:01:04,530 DEBUG [RS:0;32beccd76b35:36269 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24cd27c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32beccd76b35/172.17.0.3:0 2024-12-03T05:01:04,558 DEBUG [RS:0;32beccd76b35:36269 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;32beccd76b35:36269 2024-12-03T05:01:04,558 INFO [RS:0;32beccd76b35:36269 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T05:01:04,558 INFO [RS:0;32beccd76b35:36269 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T05:01:04,558 DEBUG [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T05:01:04,559 INFO [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(2659): reportForDuty to master=32beccd76b35,37267,1733202063393 with port=36269, startcode=1733202063527 2024-12-03T05:01:04,559 DEBUG [RS:0;32beccd76b35:36269 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T05:01:04,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741832_1008 (size=32) 2024-12-03T05:01:04,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741832_1008 (size=32) 2024-12-03T05:01:04,567 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:01:04,573 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T05:01:04,575 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T05:01:04,575 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:04,578 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:01:04,578 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T05:01:04,580 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T05:01:04,580 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:04,580 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:01:04,580 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T05:01:04,582 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T05:01:04,582 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:04,582 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:01:04,582 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T05:01:04,583 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T05:01:04,584 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:04,584 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:01:04,584 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T05:01:04,585 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740 2024-12-03T05:01:04,585 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740 2024-12-03T05:01:04,587 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T05:01:04,587 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T05:01:04,588 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T05:01:04,590 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T05:01:04,594 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T05:01:04,596 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=771646, jitterRate=-0.018802374601364136}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T05:01:04,597 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733202064568Initializing all the Stores at 1733202064569 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202064569Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202064573 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202064573Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202064573Cleaning up temporary data from old regions at 1733202064587 (+14 ms)Region opened successfully at 1733202064596 (+9 ms) 2024-12-03T05:01:04,597 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T05:01:04,597 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T05:01:04,597 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T05:01:04,597 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T05:01:04,597 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T05:01:04,600 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T05:01:04,600 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733202064597Disabling compacts and flushes for region at 1733202064597Disabling writes for close at 1733202064597Writing region close event to WAL at 1733202064599 (+2 ms)Closed at 1733202064599 2024-12-03T05:01:04,602 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60647, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T05:01:04,602 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T05:01:04,602 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T05:01:04,602 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37267 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 32beccd76b35,36269,1733202063527 2024-12-03T05:01:04,602 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T05:01:04,603 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37267 {}] master.ServerManager(517): Registering regionserver=32beccd76b35,36269,1733202063527 2024-12-03T05:01:04,606 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T05:01:04,607 DEBUG [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf 2024-12-03T05:01:04,607 DEBUG [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36989 2024-12-03T05:01:04,607 DEBUG [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T05:01:04,609 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T05:01:04,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T05:01:04,613 DEBUG [RS:0;32beccd76b35:36269 {}] zookeeper.ZKUtil(111): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/32beccd76b35,36269,1733202063527 2024-12-03T05:01:04,613 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [32beccd76b35,36269,1733202063527] 2024-12-03T05:01:04,613 WARN [RS:0;32beccd76b35:36269 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T05:01:04,613 INFO [RS:0;32beccd76b35:36269 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T05:01:04,613 DEBUG [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/WALs/32beccd76b35,36269,1733202063527 2024-12-03T05:01:04,624 INFO [RS:0;32beccd76b35:36269 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T05:01:04,629 INFO [RS:0;32beccd76b35:36269 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T05:01:04,630 INFO [RS:0;32beccd76b35:36269 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T05:01:04,630 INFO [RS:0;32beccd76b35:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:04,632 INFO [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T05:01:04,633 INFO [RS:0;32beccd76b35:36269 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T05:01:04,633 INFO [RS:0;32beccd76b35:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:04,633 DEBUG [RS:0;32beccd76b35:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:01:04,633 DEBUG [RS:0;32beccd76b35:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:01:04,633 DEBUG [RS:0;32beccd76b35:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:01:04,633 DEBUG [RS:0;32beccd76b35:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:01:04,634 DEBUG [RS:0;32beccd76b35:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:01:04,634 DEBUG [RS:0;32beccd76b35:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/32beccd76b35:0, corePoolSize=2, maxPoolSize=2 2024-12-03T05:01:04,634 DEBUG [RS:0;32beccd76b35:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:01:04,634 DEBUG [RS:0;32beccd76b35:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:01:04,634 DEBUG [RS:0;32beccd76b35:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:01:04,634 DEBUG [RS:0;32beccd76b35:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:01:04,634 DEBUG [RS:0;32beccd76b35:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:01:04,634 DEBUG [RS:0;32beccd76b35:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:01:04,634 DEBUG [RS:0;32beccd76b35:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/32beccd76b35:0, corePoolSize=3, maxPoolSize=3 2024-12-03T05:01:04,634 DEBUG [RS:0;32beccd76b35:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0, corePoolSize=3, maxPoolSize=3 2024-12-03T05:01:04,641 INFO [RS:0;32beccd76b35:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:04,641 INFO [RS:0;32beccd76b35:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:04,641 INFO [RS:0;32beccd76b35:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:04,641 INFO [RS:0;32beccd76b35:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:04,641 INFO [RS:0;32beccd76b35:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:04,641 INFO [RS:0;32beccd76b35:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,36269,1733202063527-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T05:01:04,662 INFO [RS:0;32beccd76b35:36269 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T05:01:04,662 INFO [RS:0;32beccd76b35:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,36269,1733202063527-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:04,662 INFO [RS:0;32beccd76b35:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:04,662 INFO [RS:0;32beccd76b35:36269 {}] regionserver.Replication(171): 32beccd76b35,36269,1733202063527 started 2024-12-03T05:01:04,679 INFO [RS:0;32beccd76b35:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:04,679 INFO [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(1482): Serving as 32beccd76b35,36269,1733202063527, RpcServer on 32beccd76b35/172.17.0.3:36269, sessionid=0x1015177a4ae0001 2024-12-03T05:01:04,679 DEBUG [RS:0;32beccd76b35:36269 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T05:01:04,679 DEBUG [RS:0;32beccd76b35:36269 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 32beccd76b35,36269,1733202063527 2024-12-03T05:01:04,679 DEBUG [RS:0;32beccd76b35:36269 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32beccd76b35,36269,1733202063527' 2024-12-03T05:01:04,679 DEBUG [RS:0;32beccd76b35:36269 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T05:01:04,679 DEBUG [RS:0;32beccd76b35:36269 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T05:01:04,680 DEBUG [RS:0;32beccd76b35:36269 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T05:01:04,680 DEBUG [RS:0;32beccd76b35:36269 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T05:01:04,680 DEBUG [RS:0;32beccd76b35:36269 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 32beccd76b35,36269,1733202063527 2024-12-03T05:01:04,680 DEBUG [RS:0;32beccd76b35:36269 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32beccd76b35,36269,1733202063527' 2024-12-03T05:01:04,680 DEBUG [RS:0;32beccd76b35:36269 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T05:01:04,680 DEBUG [RS:0;32beccd76b35:36269 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T05:01:04,681 DEBUG [RS:0;32beccd76b35:36269 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T05:01:04,681 INFO [RS:0;32beccd76b35:36269 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T05:01:04,681 INFO [RS:0;32beccd76b35:36269 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T05:01:04,759 WARN [32beccd76b35:37267 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T05:01:04,783 INFO [RS:0;32beccd76b35:36269 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C36269%2C1733202063527, suffix=, logDir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/WALs/32beccd76b35,36269,1733202063527, archiveDir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/oldWALs, maxLogs=32 2024-12-03T05:01:04,783 INFO [RS:0;32beccd76b35:36269 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C36269%2C1733202063527.1733202064783 2024-12-03T05:01:04,804 INFO [RS:0;32beccd76b35:36269 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/WALs/32beccd76b35,36269,1733202063527/32beccd76b35%2C36269%2C1733202063527.1733202064783 2024-12-03T05:01:04,805 DEBUG [RS:0;32beccd76b35:36269 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34719:34719),(127.0.0.1/127.0.0.1:42049:42049)] 2024-12-03T05:01:05,010 DEBUG [32beccd76b35:37267 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T05:01:05,010 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=32beccd76b35,36269,1733202063527 2024-12-03T05:01:05,012 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 32beccd76b35,36269,1733202063527, state=OPENING 2024-12-03T05:01:05,014 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T05:01:05,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:01:05,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:01:05,017 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T05:01:05,017 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T05:01:05,017 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T05:01:05,017 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=32beccd76b35,36269,1733202063527}] 2024-12-03T05:01:05,173 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T05:01:05,176 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56339, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T05:01:05,189 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T05:01:05,190 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T05:01:05,194 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C36269%2C1733202063527.meta, suffix=.meta, logDir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/WALs/32beccd76b35,36269,1733202063527, archiveDir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/oldWALs, maxLogs=32 2024-12-03T05:01:05,194 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C36269%2C1733202063527.meta.1733202065194.meta 2024-12-03T05:01:05,274 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/WALs/32beccd76b35,36269,1733202063527/32beccd76b35%2C36269%2C1733202063527.meta.1733202065194.meta 2024-12-03T05:01:05,335 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42049:42049),(127.0.0.1/127.0.0.1:34719:34719)] 2024-12-03T05:01:05,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:05,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:05,357 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T05:01:05,357 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T05:01:05,357 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T05:01:05,357 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T05:01:05,357 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T05:01:05,358 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:01:05,358 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T05:01:05,358 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T05:01:05,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T05:01:05,370 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T05:01:05,370 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:05,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:01:05,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T05:01:05,372 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T05:01:05,372 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:05,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:01:05,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T05:01:05,374 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T05:01:05,374 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:05,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:01:05,375 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T05:01:05,376 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T05:01:05,376 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:05,377 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:01:05,377 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T05:01:05,378 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740 2024-12-03T05:01:05,379 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740 2024-12-03T05:01:05,391 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T05:01:05,391 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T05:01:05,396 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T05:01:05,400 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T05:01:05,401 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=837218, jitterRate=0.06457863748073578}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T05:01:05,401 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T05:01:05,402 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733202065358Writing region info on filesystem at 1733202065358Initializing all the Stores at 1733202065360 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202065360Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202065365 (+5 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202065365Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202065365Cleaning up temporary data from old regions at 1733202065391 (+26 ms)Running coprocessor post-open hooks at 1733202065401 (+10 ms)Region opened successfully at 1733202065402 (+1 ms) 2024-12-03T05:01:05,409 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733202065173 2024-12-03T05:01:05,417 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T05:01:05,417 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T05:01:05,420 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=32beccd76b35,36269,1733202063527 2024-12-03T05:01:05,422 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 32beccd76b35,36269,1733202063527, state=OPEN 2024-12-03T05:01:05,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T05:01:05,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T05:01:05,430 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T05:01:05,430 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T05:01:05,431 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=32beccd76b35,36269,1733202063527 2024-12-03T05:01:05,434 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T05:01:05,434 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=32beccd76b35,36269,1733202063527 in 414 msec 2024-12-03T05:01:05,437 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T05:01:05,437 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 832 msec 2024-12-03T05:01:05,438 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T05:01:05,439 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T05:01:05,440 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T05:01:05,440 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=32beccd76b35,36269,1733202063527, seqNum=-1] 2024-12-03T05:01:05,441 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T05:01:05,442 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38157, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T05:01:05,455 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 988 msec 2024-12-03T05:01:05,455 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733202065455, completionTime=-1 2024-12-03T05:01:05,455 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T05:01:05,455 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T05:01:05,458 INFO [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T05:01:05,458 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733202125458 2024-12-03T05:01:05,458 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733202185458 2024-12-03T05:01:05,458 INFO [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-03T05:01:05,459 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,37267,1733202063393-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:05,459 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,37267,1733202063393-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:05,459 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,37267,1733202063393-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:05,459 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-32beccd76b35:37267, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:05,459 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:05,459 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:05,462 DEBUG [master/32beccd76b35:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T05:01:05,464 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.815sec 2024-12-03T05:01:05,464 INFO [master/32beccd76b35:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T05:01:05,464 INFO [master/32beccd76b35:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T05:01:05,464 INFO [master/32beccd76b35:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T05:01:05,464 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T05:01:05,464 INFO [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T05:01:05,464 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,37267,1733202063393-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T05:01:05,464 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,37267,1733202063393-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T05:01:05,479 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T05:01:05,479 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T05:01:05,479 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,37267,1733202063393-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:01:05,530 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4db46c0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T05:01:05,530 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 32beccd76b35,37267,-1 for getting cluster id 2024-12-03T05:01:05,531 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T05:01:05,540 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '172a467f-90c9-4b20-8749-f53bf4e33436' 2024-12-03T05:01:05,540 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T05:01:05,540 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "172a467f-90c9-4b20-8749-f53bf4e33436" 2024-12-03T05:01:05,540 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bcfbd03, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T05:01:05,540 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [32beccd76b35,37267,-1] 2024-12-03T05:01:05,541 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T05:01:05,541 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:01:05,542 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37038, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T05:01:05,544 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c7ed85b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T05:01:05,544 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T05:01:05,545 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=32beccd76b35,36269,1733202063527, seqNum=-1] 2024-12-03T05:01:05,546 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T05:01:05,547 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41112, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T05:01:05,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=32beccd76b35,37267,1733202063393 2024-12-03T05:01:05,550 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:01:05,563 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T05:01:05,564 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-03T05:01:05,565 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 32beccd76b35,37267,1733202063393 2024-12-03T05:01:05,565 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@34648aa 2024-12-03T05:01:05,565 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T05:01:05,566 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37044, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T05:01:05,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37267 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-03T05:01:05,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37267 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-03T05:01:05,568 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37267 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T05:01:05,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37267 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-03T05:01:05,582 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T05:01:05,582 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:05,583 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37267 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-03T05:01:05,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37267 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T05:01:05,584 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T05:01:05,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741835_1011 (size=381) 2024-12-03T05:01:05,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741835_1011 (size=381) 2024-12-03T05:01:05,625 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c411983ff3bea3ce63a0c6f49ca7b025, NAME => 'TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf 2024-12-03T05:01:05,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741836_1012 (size=64) 2024-12-03T05:01:05,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741836_1012 (size=64) 2024-12-03T05:01:05,706 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:01:05,706 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing c411983ff3bea3ce63a0c6f49ca7b025, disabling compactions & flushes 2024-12-03T05:01:05,706 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. 2024-12-03T05:01:05,706 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. 2024-12-03T05:01:05,706 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. after waiting 0 ms 2024-12-03T05:01:05,706 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. 2024-12-03T05:01:05,706 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. 2024-12-03T05:01:05,706 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for c411983ff3bea3ce63a0c6f49ca7b025: Waiting for close lock at 1733202065706Disabling compacts and flushes for region at 1733202065706Disabling writes for close at 1733202065706Writing region close event to WAL at 1733202065706Closed at 1733202065706 2024-12-03T05:01:05,708 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T05:01:05,708 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733202065708"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733202065708"}]},"ts":"1733202065708"} 2024-12-03T05:01:05,711 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-03T05:01:05,712 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T05:01:05,713 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733202065712"}]},"ts":"1733202065712"} 2024-12-03T05:01:05,715 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-03T05:01:05,715 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c411983ff3bea3ce63a0c6f49ca7b025, ASSIGN}] 2024-12-03T05:01:05,718 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c411983ff3bea3ce63a0c6f49ca7b025, ASSIGN 2024-12-03T05:01:05,729 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c411983ff3bea3ce63a0c6f49ca7b025, ASSIGN; state=OFFLINE, location=32beccd76b35,36269,1733202063527; forceNewPlan=false, retain=false 2024-12-03T05:01:05,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,764 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,764 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,778 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,778 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,782 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:05,881 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c411983ff3bea3ce63a0c6f49ca7b025, regionState=OPENING, regionLocation=32beccd76b35,36269,1733202063527 2024-12-03T05:01:05,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c411983ff3bea3ce63a0c6f49ca7b025, ASSIGN because future has completed 2024-12-03T05:01:05,885 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c411983ff3bea3ce63a0c6f49ca7b025, server=32beccd76b35,36269,1733202063527}] 2024-12-03T05:01:06,047 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. 2024-12-03T05:01:06,047 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c411983ff3bea3ce63a0c6f49ca7b025, NAME => 'TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025.', STARTKEY => '', ENDKEY => ''} 2024-12-03T05:01:06,047 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:06,047 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:01:06,047 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:06,047 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:06,051 INFO [StoreOpener-c411983ff3bea3ce63a0c6f49ca7b025-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:06,053 INFO [StoreOpener-c411983ff3bea3ce63a0c6f49ca7b025-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c411983ff3bea3ce63a0c6f49ca7b025 columnFamilyName info 2024-12-03T05:01:06,053 DEBUG [StoreOpener-c411983ff3bea3ce63a0c6f49ca7b025-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:06,053 INFO [StoreOpener-c411983ff3bea3ce63a0c6f49ca7b025-1 {}] regionserver.HStore(327): Store=c411983ff3bea3ce63a0c6f49ca7b025/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T05:01:06,054 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:06,056 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:06,056 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:06,057 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:06,057 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:06,059 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:06,062 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T05:01:06,063 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c411983ff3bea3ce63a0c6f49ca7b025; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=808139, jitterRate=0.027602121233940125}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T05:01:06,063 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:06,064 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c411983ff3bea3ce63a0c6f49ca7b025: Running coprocessor pre-open hook at 1733202066048Writing region info on filesystem at 1733202066048Initializing all the Stores at 1733202066051 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202066051Cleaning up temporary data from old regions at 1733202066057 (+6 ms)Running coprocessor post-open hooks at 1733202066063 (+6 ms)Region opened successfully at 1733202066064 (+1 ms) 2024-12-03T05:01:06,065 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., pid=6, masterSystemTime=1733202066038 2024-12-03T05:01:06,069 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. 2024-12-03T05:01:06,069 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. 2024-12-03T05:01:06,076 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c411983ff3bea3ce63a0c6f49ca7b025, regionState=OPEN, openSeqNum=2, regionLocation=32beccd76b35,36269,1733202063527 2024-12-03T05:01:06,079 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c411983ff3bea3ce63a0c6f49ca7b025, server=32beccd76b35,36269,1733202063527 because future has completed 2024-12-03T05:01:06,087 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T05:01:06,087 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c411983ff3bea3ce63a0c6f49ca7b025, server=32beccd76b35,36269,1733202063527 in 197 msec 2024-12-03T05:01:06,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T05:01:06,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c411983ff3bea3ce63a0c6f49ca7b025, ASSIGN in 372 msec 2024-12-03T05:01:06,091 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T05:01:06,091 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733202066091"}]},"ts":"1733202066091"} 2024-12-03T05:01:06,094 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-03T05:01:06,095 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T05:01:06,097 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 527 msec 2024-12-03T05:01:06,289 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T05:01:06,290 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,290 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,291 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,291 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,291 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,291 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,292 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,293 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,325 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,326 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,335 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,338 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:06,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:06,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:07,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:07,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:08,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:08,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:09,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:09,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:10,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:10,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:10,624 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T05:01:10,625 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-03T05:01:11,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:11,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:11,550 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T05:01:11,559 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,559 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,560 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,560 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,560 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,561 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,562 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,562 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,635 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,635 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,659 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,659 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:11,663 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:12,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:12,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:12,833 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T05:01:12,833 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-03T05:01:12,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T05:01:12,834 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-03T05:01:12,834 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-03T05:01:12,834 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-03T05:01:12,835 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-03T05:01:12,835 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-03T05:01:13,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:13,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:14,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:14,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:15,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:15,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:15,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37267 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-03T05:01:15,650 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-03T05:01:15,650 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-03T05:01:15,654 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-03T05:01:15,655 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. 2024-12-03T05:01:15,658 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., hostname=32beccd76b35,36269,1733202063527, seqNum=2] 2024-12-03T05:01:15,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:15,695 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c411983ff3bea3ce63a0c6f49ca7b025 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T05:01:15,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/509b6de8bc374dffb2647c5178f8c1ad is 1080, key is row0001/info:/1733202075660/Put/seqid=0 2024-12-03T05:01:15,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c411983ff3bea3ce63a0c6f49ca7b025, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T05:01:15,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:41112 deadline: 1733202085744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c411983ff3bea3ce63a0c6f49ca7b025, server=32beccd76b35,36269,1733202063527 2024-12-03T05:01:15,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741837_1013 (size=12509) 2024-12-03T05:01:15,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741837_1013 (size=12509) 2024-12-03T05:01:15,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/509b6de8bc374dffb2647c5178f8c1ad 2024-12-03T05:01:15,779 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., hostname=32beccd76b35,36269,1733202063527, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., hostname=32beccd76b35,36269,1733202063527, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c411983ff3bea3ce63a0c6f49ca7b025, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T05:01:15,780 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., hostname=32beccd76b35,36269,1733202063527, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c411983ff3bea3ce63a0c6f49ca7b025, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T05:01:15,780 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., hostname=32beccd76b35,36269,1733202063527, seqNum=2 because the exception is null or not the one we care about 2024-12-03T05:01:15,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/509b6de8bc374dffb2647c5178f8c1ad as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/509b6de8bc374dffb2647c5178f8c1ad 2024-12-03T05:01:15,807 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/509b6de8bc374dffb2647c5178f8c1ad, entries=7, sequenceid=11, filesize=12.2 K 2024-12-03T05:01:15,809 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for c411983ff3bea3ce63a0c6f49ca7b025 in 114ms, sequenceid=11, compaction requested=false 2024-12-03T05:01:15,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c411983ff3bea3ce63a0c6f49ca7b025: 2024-12-03T05:01:16,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:16,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:17,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:17,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:18,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:18,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:19,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:19,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:20,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:20,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:21,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:21,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:22,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:22,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:23,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:23,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:24,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:24,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:25,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:25,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:25,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:25,851 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c411983ff3bea3ce63a0c6f49ca7b025 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-03T05:01:25,856 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/5a3cf9c5efa145ad8d31a88329b03cbe is 1080, key is row0008/info:/1733202075697/Put/seqid=0 2024-12-03T05:01:25,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741838_1014 (size=29761) 2024-12-03T05:01:25,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741838_1014 (size=29761) 2024-12-03T05:01:25,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/5a3cf9c5efa145ad8d31a88329b03cbe 2024-12-03T05:01:25,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/5a3cf9c5efa145ad8d31a88329b03cbe as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/5a3cf9c5efa145ad8d31a88329b03cbe 2024-12-03T05:01:25,873 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/5a3cf9c5efa145ad8d31a88329b03cbe, entries=23, sequenceid=37, filesize=29.1 K 2024-12-03T05:01:25,874 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for c411983ff3bea3ce63a0c6f49ca7b025 in 23ms, sequenceid=37, compaction requested=false 2024-12-03T05:01:25,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c411983ff3bea3ce63a0c6f49ca7b025: 2024-12-03T05:01:25,875 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-12-03T05:01:25,875 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T05:01:25,875 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/5a3cf9c5efa145ad8d31a88329b03cbe because midkey is the same as first or last row 2024-12-03T05:01:26,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:26,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:27,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:27,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:27,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:27,864 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c411983ff3bea3ce63a0c6f49ca7b025 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T05:01:27,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/01838329869549659381ca2bee30172f is 1080, key is row0031/info:/1733202085852/Put/seqid=0 2024-12-03T05:01:27,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741839_1015 (size=12509) 2024-12-03T05:01:27,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741839_1015 (size=12509) 2024-12-03T05:01:27,876 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/01838329869549659381ca2bee30172f 2024-12-03T05:01:27,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/01838329869549659381ca2bee30172f as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/01838329869549659381ca2bee30172f 2024-12-03T05:01:27,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/01838329869549659381ca2bee30172f, entries=7, sequenceid=47, filesize=12.2 K 2024-12-03T05:01:27,887 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9684 for c411983ff3bea3ce63a0c6f49ca7b025 in 23ms, sequenceid=47, compaction requested=true 2024-12-03T05:01:27,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c411983ff3bea3ce63a0c6f49ca7b025: 2024-12-03T05:01:27,888 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-12-03T05:01:27,888 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T05:01:27,888 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/5a3cf9c5efa145ad8d31a88329b03cbe because midkey is the same as first or last row 2024-12-03T05:01:27,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c411983ff3bea3ce63a0c6f49ca7b025:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T05:01:27,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:01:27,888 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T05:01:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:27,889 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c411983ff3bea3ce63a0c6f49ca7b025 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-03T05:01:27,889 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T05:01:27,889 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1541): c411983ff3bea3ce63a0c6f49ca7b025/info is initiating minor compaction (all files) 2024-12-03T05:01:27,889 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c411983ff3bea3ce63a0c6f49ca7b025/info in TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. 2024-12-03T05:01:27,889 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/509b6de8bc374dffb2647c5178f8c1ad, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/5a3cf9c5efa145ad8d31a88329b03cbe, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/01838329869549659381ca2bee30172f] into tmpdir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp, totalSize=53.5 K 2024-12-03T05:01:27,890 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 509b6de8bc374dffb2647c5178f8c1ad, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733202075660 2024-12-03T05:01:27,890 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5a3cf9c5efa145ad8d31a88329b03cbe, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733202075697 2024-12-03T05:01:27,891 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 01838329869549659381ca2bee30172f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733202085852 2024-12-03T05:01:27,893 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/c0a5b491e61b4df6950fad0b88553e44 is 1080, key is row0038/info:/1733202087865/Put/seqid=0 2024-12-03T05:01:27,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741840_1016 (size=15740) 2024-12-03T05:01:27,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741840_1016 (size=15740) 2024-12-03T05:01:27,907 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/c0a5b491e61b4df6950fad0b88553e44 2024-12-03T05:01:27,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/c0a5b491e61b4df6950fad0b88553e44 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/c0a5b491e61b4df6950fad0b88553e44 2024-12-03T05:01:27,924 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c411983ff3bea3ce63a0c6f49ca7b025#info#compaction#58 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T05:01:27,925 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/79ef955e1fb44b8baf11fe85820a808a is 1080, key is row0001/info:/1733202075660/Put/seqid=0 2024-12-03T05:01:27,928 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/c0a5b491e61b4df6950fad0b88553e44, entries=10, sequenceid=60, filesize=15.4 K 2024-12-03T05:01:27,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=17.86 KB/18292 for c411983ff3bea3ce63a0c6f49ca7b025 in 39ms, sequenceid=60, compaction requested=false 2024-12-03T05:01:27,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c411983ff3bea3ce63a0c6f49ca7b025: 2024-12-03T05:01:27,929 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.9 K, sizeToCheck=16.0 K 2024-12-03T05:01:27,929 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T05:01:27,929 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/5a3cf9c5efa145ad8d31a88329b03cbe because midkey is the same as first or last row 2024-12-03T05:01:27,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741841_1017 (size=44978) 2024-12-03T05:01:27,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741841_1017 (size=44978) 2024-12-03T05:01:28,344 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/79ef955e1fb44b8baf11fe85820a808a as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/79ef955e1fb44b8baf11fe85820a808a 2024-12-03T05:01:28,350 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c411983ff3bea3ce63a0c6f49ca7b025/info of c411983ff3bea3ce63a0c6f49ca7b025 into 79ef955e1fb44b8baf11fe85820a808a(size=43.9 K), total size for store is 59.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T05:01:28,350 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c411983ff3bea3ce63a0c6f49ca7b025: 2024-12-03T05:01:28,350 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., storeName=c411983ff3bea3ce63a0c6f49ca7b025/info, priority=13, startTime=1733202087888; duration=0sec 2024-12-03T05:01:28,350 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-12-03T05:01:28,351 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T05:01:28,351 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/79ef955e1fb44b8baf11fe85820a808a because midkey is the same as first or last row 2024-12-03T05:01:28,351 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-12-03T05:01:28,351 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T05:01:28,351 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/79ef955e1fb44b8baf11fe85820a808a because midkey is the same as first or last row 2024-12-03T05:01:28,351 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-12-03T05:01:28,351 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T05:01:28,351 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/79ef955e1fb44b8baf11fe85820a808a because midkey is the same as first or last row 2024-12-03T05:01:28,351 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:01:28,351 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c411983ff3bea3ce63a0c6f49ca7b025:info 2024-12-03T05:01:28,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:28,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:29,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:29,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:29,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:29,919 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c411983ff3bea3ce63a0c6f49ca7b025 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-03T05:01:29,925 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/aecebc16a5a74d3ab5f2f01b440c42e4 is 1080, key is row0048/info:/1733202087890/Put/seqid=0 2024-12-03T05:01:29,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741842_1018 (size=24376) 2024-12-03T05:01:29,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741842_1018 (size=24376) 2024-12-03T05:01:29,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/aecebc16a5a74d3ab5f2f01b440c42e4 2024-12-03T05:01:29,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/aecebc16a5a74d3ab5f2f01b440c42e4 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/aecebc16a5a74d3ab5f2f01b440c42e4 2024-12-03T05:01:29,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c411983ff3bea3ce63a0c6f49ca7b025, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T05:01:29,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:41112 deadline: 1733202099941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c411983ff3bea3ce63a0c6f49ca7b025, server=32beccd76b35,36269,1733202063527 2024-12-03T05:01:29,948 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., hostname=32beccd76b35,36269,1733202063527, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., hostname=32beccd76b35,36269,1733202063527, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c411983ff3bea3ce63a0c6f49ca7b025, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T05:01:29,948 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., hostname=32beccd76b35,36269,1733202063527, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=c411983ff3bea3ce63a0c6f49ca7b025, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T05:01:29,948 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., hostname=32beccd76b35,36269,1733202063527, seqNum=2 because the exception is null or not the one we care about 2024-12-03T05:01:29,949 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/aecebc16a5a74d3ab5f2f01b440c42e4, entries=18, sequenceid=82, filesize=23.8 K 2024-12-03T05:01:29,950 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for c411983ff3bea3ce63a0c6f49ca7b025 in 31ms, sequenceid=82, compaction requested=true 2024-12-03T05:01:29,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c411983ff3bea3ce63a0c6f49ca7b025: 2024-12-03T05:01:29,950 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-12-03T05:01:29,950 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T05:01:29,950 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/79ef955e1fb44b8baf11fe85820a808a because midkey is the same as first or last row 2024-12-03T05:01:29,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c411983ff3bea3ce63a0c6f49ca7b025:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T05:01:29,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:01:29,950 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T05:01:29,951 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T05:01:29,951 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1541): c411983ff3bea3ce63a0c6f49ca7b025/info is initiating minor compaction (all files) 2024-12-03T05:01:29,951 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c411983ff3bea3ce63a0c6f49ca7b025/info in TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. 2024-12-03T05:01:29,952 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/79ef955e1fb44b8baf11fe85820a808a, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/c0a5b491e61b4df6950fad0b88553e44, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/aecebc16a5a74d3ab5f2f01b440c42e4] into tmpdir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp, totalSize=83.1 K 2024-12-03T05:01:29,952 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 79ef955e1fb44b8baf11fe85820a808a, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733202075660 2024-12-03T05:01:29,952 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting c0a5b491e61b4df6950fad0b88553e44, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1733202087865 2024-12-03T05:01:29,953 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting aecebc16a5a74d3ab5f2f01b440c42e4, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733202087890 2024-12-03T05:01:29,966 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c411983ff3bea3ce63a0c6f49ca7b025#info#compaction#60 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T05:01:29,966 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/1c5b65c0b85d45fda44c90652b16e4ee is 1080, key is row0001/info:/1733202075660/Put/seqid=0 2024-12-03T05:01:29,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741843_1019 (size=75378) 2024-12-03T05:01:29,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741843_1019 (size=75378) 2024-12-03T05:01:29,975 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/1c5b65c0b85d45fda44c90652b16e4ee as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/1c5b65c0b85d45fda44c90652b16e4ee 2024-12-03T05:01:29,981 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c411983ff3bea3ce63a0c6f49ca7b025/info of c411983ff3bea3ce63a0c6f49ca7b025 into 1c5b65c0b85d45fda44c90652b16e4ee(size=73.6 K), total size for store is 73.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T05:01:29,981 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c411983ff3bea3ce63a0c6f49ca7b025: 2024-12-03T05:01:29,981 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., storeName=c411983ff3bea3ce63a0c6f49ca7b025/info, priority=13, startTime=1733202089950; duration=0sec 2024-12-03T05:01:29,981 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-12-03T05:01:29,981 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T05:01:29,981 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-12-03T05:01:29,981 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T05:01:29,982 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-12-03T05:01:29,982 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-03T05:01:29,983 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:01:29,983 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:01:29,983 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c411983ff3bea3ce63a0c6f49ca7b025:info 2024-12-03T05:01:29,984 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37267 {}] assignment.AssignmentManager(1363): Split request from 32beccd76b35,36269,1733202063527, parent={ENCODED => c411983ff3bea3ce63a0c6f49ca7b025, NAME => 'TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-03T05:01:29,989 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37267 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=32beccd76b35,36269,1733202063527 2024-12-03T05:01:29,993 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37267 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c411983ff3bea3ce63a0c6f49ca7b025, daughterA=d18ac3340fdcc16022f867385a7942da, daughterB=124beb052e88936506b4d7691f51b7dd 2024-12-03T05:01:29,994 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c411983ff3bea3ce63a0c6f49ca7b025, daughterA=d18ac3340fdcc16022f867385a7942da, daughterB=124beb052e88936506b4d7691f51b7dd 2024-12-03T05:01:29,994 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c411983ff3bea3ce63a0c6f49ca7b025, daughterA=d18ac3340fdcc16022f867385a7942da, daughterB=124beb052e88936506b4d7691f51b7dd 2024-12-03T05:01:29,994 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c411983ff3bea3ce63a0c6f49ca7b025, daughterA=d18ac3340fdcc16022f867385a7942da, daughterB=124beb052e88936506b4d7691f51b7dd 2024-12-03T05:01:30,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c411983ff3bea3ce63a0c6f49ca7b025, UNASSIGN}] 2024-12-03T05:01:30,003 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c411983ff3bea3ce63a0c6f49ca7b025, UNASSIGN 2024-12-03T05:01:30,005 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=c411983ff3bea3ce63a0c6f49ca7b025, regionState=CLOSING, regionLocation=32beccd76b35,36269,1733202063527 2024-12-03T05:01:30,007 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c411983ff3bea3ce63a0c6f49ca7b025, UNASSIGN because future has completed 2024-12-03T05:01:30,007 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-03T05:01:30,008 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure c411983ff3bea3ce63a0c6f49ca7b025, server=32beccd76b35,36269,1733202063527}] 2024-12-03T05:01:30,165 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:30,165 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-03T05:01:30,165 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing c411983ff3bea3ce63a0c6f49ca7b025, disabling compactions & flushes 2024-12-03T05:01:30,166 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. 2024-12-03T05:01:30,166 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. 2024-12-03T05:01:30,166 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. after waiting 0 ms 2024-12-03T05:01:30,166 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. 2024-12-03T05:01:30,166 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing c411983ff3bea3ce63a0c6f49ca7b025 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-03T05:01:30,170 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/8bb8a556f2bd479db428aa48b6543d77 is 1080, key is row0066/info:/1733202089920/Put/seqid=0 2024-12-03T05:01:30,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741844_1020 (size=16817) 2024-12-03T05:01:30,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741844_1020 (size=16817) 2024-12-03T05:01:30,178 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/8bb8a556f2bd479db428aa48b6543d77 2024-12-03T05:01:30,186 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/.tmp/info/8bb8a556f2bd479db428aa48b6543d77 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/8bb8a556f2bd479db428aa48b6543d77 2024-12-03T05:01:30,197 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/8bb8a556f2bd479db428aa48b6543d77, entries=11, sequenceid=97, filesize=16.4 K 2024-12-03T05:01:30,198 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=0 B/0 for c411983ff3bea3ce63a0c6f49ca7b025 in 32ms, sequenceid=97, compaction requested=false 2024-12-03T05:01:30,199 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/509b6de8bc374dffb2647c5178f8c1ad, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/5a3cf9c5efa145ad8d31a88329b03cbe, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/79ef955e1fb44b8baf11fe85820a808a, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/01838329869549659381ca2bee30172f, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/c0a5b491e61b4df6950fad0b88553e44, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/aecebc16a5a74d3ab5f2f01b440c42e4] to archive 2024-12-03T05:01:30,200 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T05:01:30,218 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/509b6de8bc374dffb2647c5178f8c1ad to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/509b6de8bc374dffb2647c5178f8c1ad 2024-12-03T05:01:30,220 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/5a3cf9c5efa145ad8d31a88329b03cbe to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/5a3cf9c5efa145ad8d31a88329b03cbe 2024-12-03T05:01:30,223 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/79ef955e1fb44b8baf11fe85820a808a to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/79ef955e1fb44b8baf11fe85820a808a 2024-12-03T05:01:30,226 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/01838329869549659381ca2bee30172f to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/01838329869549659381ca2bee30172f 2024-12-03T05:01:30,227 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/c0a5b491e61b4df6950fad0b88553e44 to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/c0a5b491e61b4df6950fad0b88553e44 2024-12-03T05:01:30,228 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/aecebc16a5a74d3ab5f2f01b440c42e4 to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/aecebc16a5a74d3ab5f2f01b440c42e4 2024-12-03T05:01:30,235 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/recovered.edits/100.seqid, newMaxSeqId=100, maxSeqId=1 2024-12-03T05:01:30,237 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. 2024-12-03T05:01:30,237 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for c411983ff3bea3ce63a0c6f49ca7b025: Waiting for close lock at 1733202090165Running coprocessor pre-close hooks at 1733202090165Disabling compacts and flushes for region at 1733202090165Disabling writes for close at 1733202090166 (+1 ms)Obtaining lock to block concurrent updates at 1733202090166Preparing flush snapshotting stores in c411983ff3bea3ce63a0c6f49ca7b025 at 1733202090166Finished memstore snapshotting TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., syncing WAL and waiting on mvcc, flushsize=dataSize=11836, getHeapSize=12912, getOffHeapSize=0, getCellsCount=11 at 1733202090166Flushing stores of TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. at 1733202090167 (+1 ms)Flushing c411983ff3bea3ce63a0c6f49ca7b025/info: creating writer at 1733202090167Flushing c411983ff3bea3ce63a0c6f49ca7b025/info: appending metadata at 1733202090170 (+3 ms)Flushing c411983ff3bea3ce63a0c6f49ca7b025/info: closing flushed file at 1733202090170Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f2a7699: reopening flushed file at 1733202090185 (+15 ms)Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=0 B/0 for c411983ff3bea3ce63a0c6f49ca7b025 in 32ms, sequenceid=97, compaction requested=false at 1733202090198 (+13 ms)Writing region close event to WAL at 1733202090231 (+33 ms)Running coprocessor post-close hooks at 1733202090237 (+6 ms)Closed at 1733202090237 2024-12-03T05:01:30,240 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:30,240 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=c411983ff3bea3ce63a0c6f49ca7b025, regionState=CLOSED 2024-12-03T05:01:30,243 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure c411983ff3bea3ce63a0c6f49ca7b025, server=32beccd76b35,36269,1733202063527 because future has completed 2024-12-03T05:01:30,247 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-03T05:01:30,247 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure c411983ff3bea3ce63a0c6f49ca7b025, server=32beccd76b35,36269,1733202063527 in 236 msec 2024-12-03T05:01:30,251 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-03T05:01:30,251 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c411983ff3bea3ce63a0c6f49ca7b025, UNASSIGN in 245 msec 2024-12-03T05:01:30,265 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:30,268 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=c411983ff3bea3ce63a0c6f49ca7b025, threads=2 2024-12-03T05:01:30,271 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/1c5b65c0b85d45fda44c90652b16e4ee for region: c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:30,271 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/8bb8a556f2bd479db428aa48b6543d77 for region: c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:30,281 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/8bb8a556f2bd479db428aa48b6543d77, top=true 2024-12-03T05:01:30,294 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/TestLogRolling-testLogRolling=c411983ff3bea3ce63a0c6f49ca7b025-8bb8a556f2bd479db428aa48b6543d77 for child: 124beb052e88936506b4d7691f51b7dd, parent: c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:30,294 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/8bb8a556f2bd479db428aa48b6543d77 for region: c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:30,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741845_1021 (size=27) 2024-12-03T05:01:30,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741845_1021 (size=27) 2024-12-03T05:01:30,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741846_1022 (size=27) 2024-12-03T05:01:30,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741846_1022 (size=27) 2024-12-03T05:01:30,308 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/1c5b65c0b85d45fda44c90652b16e4ee for region: c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:01:30,310 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region c411983ff3bea3ce63a0c6f49ca7b025 Daughter A: [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/d18ac3340fdcc16022f867385a7942da/info/1c5b65c0b85d45fda44c90652b16e4ee.c411983ff3bea3ce63a0c6f49ca7b025] storefiles, Daughter B: [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/1c5b65c0b85d45fda44c90652b16e4ee.c411983ff3bea3ce63a0c6f49ca7b025, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/TestLogRolling-testLogRolling=c411983ff3bea3ce63a0c6f49ca7b025-8bb8a556f2bd479db428aa48b6543d77] storefiles. 2024-12-03T05:01:30,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741847_1023 (size=71) 2024-12-03T05:01:30,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741847_1023 (size=71) 2024-12-03T05:01:30,322 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:30,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741848_1024 (size=71) 2024-12-03T05:01:30,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741848_1024 (size=71) 2024-12-03T05:01:30,335 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:30,347 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/d18ac3340fdcc16022f867385a7942da/recovered.edits/100.seqid, newMaxSeqId=100, maxSeqId=-1 2024-12-03T05:01:30,350 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/recovered.edits/100.seqid, newMaxSeqId=100, maxSeqId=-1 2024-12-03T05:01:30,353 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733202090353"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733202090353"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733202090353"}]},"ts":"1733202090353"} 2024-12-03T05:01:30,353 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733202090353"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733202090353"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733202090353"}]},"ts":"1733202090353"} 2024-12-03T05:01:30,354 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733202090353"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733202090353"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733202090353"}]},"ts":"1733202090353"} 2024-12-03T05:01:30,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:30,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:30,385 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d18ac3340fdcc16022f867385a7942da, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=124beb052e88936506b4d7691f51b7dd, ASSIGN}] 2024-12-03T05:01:30,388 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d18ac3340fdcc16022f867385a7942da, ASSIGN 2024-12-03T05:01:30,388 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=124beb052e88936506b4d7691f51b7dd, ASSIGN 2024-12-03T05:01:30,389 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d18ac3340fdcc16022f867385a7942da, ASSIGN; state=SPLITTING_NEW, location=32beccd76b35,36269,1733202063527; forceNewPlan=false, retain=false 2024-12-03T05:01:30,389 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=124beb052e88936506b4d7691f51b7dd, ASSIGN; state=SPLITTING_NEW, location=32beccd76b35,36269,1733202063527; forceNewPlan=false, retain=false 2024-12-03T05:01:30,540 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=124beb052e88936506b4d7691f51b7dd, regionState=OPENING, regionLocation=32beccd76b35,36269,1733202063527 2024-12-03T05:01:30,540 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=d18ac3340fdcc16022f867385a7942da, regionState=OPENING, regionLocation=32beccd76b35,36269,1733202063527 2024-12-03T05:01:30,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=124beb052e88936506b4d7691f51b7dd, ASSIGN because future has completed 2024-12-03T05:01:30,544 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527}] 2024-12-03T05:01:30,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d18ac3340fdcc16022f867385a7942da, ASSIGN because future has completed 2024-12-03T05:01:30,546 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure d18ac3340fdcc16022f867385a7942da, server=32beccd76b35,36269,1733202063527}] 2024-12-03T05:01:30,702 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:01:30,702 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 124beb052e88936506b4d7691f51b7dd, NAME => 'TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-03T05:01:30,702 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:01:30,703 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:01:30,703 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:01:30,703 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:01:30,704 INFO [StoreOpener-124beb052e88936506b4d7691f51b7dd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:01:30,705 INFO [StoreOpener-124beb052e88936506b4d7691f51b7dd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 124beb052e88936506b4d7691f51b7dd columnFamilyName info 2024-12-03T05:01:30,705 DEBUG [StoreOpener-124beb052e88936506b4d7691f51b7dd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:30,730 DEBUG [StoreOpener-124beb052e88936506b4d7691f51b7dd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/1c5b65c0b85d45fda44c90652b16e4ee.c411983ff3bea3ce63a0c6f49ca7b025->hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/1c5b65c0b85d45fda44c90652b16e4ee-top 2024-12-03T05:01:30,737 DEBUG [StoreOpener-124beb052e88936506b4d7691f51b7dd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/TestLogRolling-testLogRolling=c411983ff3bea3ce63a0c6f49ca7b025-8bb8a556f2bd479db428aa48b6543d77 2024-12-03T05:01:30,737 INFO [StoreOpener-124beb052e88936506b4d7691f51b7dd-1 {}] regionserver.HStore(327): Store=124beb052e88936506b4d7691f51b7dd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T05:01:30,738 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:01:30,738 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd 2024-12-03T05:01:30,741 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd 2024-12-03T05:01:30,742 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:01:30,742 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:01:30,748 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:01:30,749 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 124beb052e88936506b4d7691f51b7dd; next sequenceid=101; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=830909, jitterRate=0.05655544996261597}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T05:01:30,749 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:01:30,750 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 124beb052e88936506b4d7691f51b7dd: Running coprocessor pre-open hook at 1733202090703Writing region info on filesystem at 1733202090703Initializing all the Stores at 1733202090704 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202090704Cleaning up temporary data from old regions at 1733202090742 (+38 ms)Running coprocessor post-open hooks at 1733202090749 (+7 ms)Region opened successfully at 1733202090750 (+1 ms) 2024-12-03T05:01:30,752 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., pid=12, masterSystemTime=1733202090697 2024-12-03T05:01:30,752 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 124beb052e88936506b4d7691f51b7dd:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T05:01:30,752 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:01:30,752 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-03T05:01:30,754 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:01:30,754 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1541): 124beb052e88936506b4d7691f51b7dd/info is initiating minor compaction (all files) 2024-12-03T05:01:30,754 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 124beb052e88936506b4d7691f51b7dd/info in TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:01:30,754 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/1c5b65c0b85d45fda44c90652b16e4ee.c411983ff3bea3ce63a0c6f49ca7b025->hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/1c5b65c0b85d45fda44c90652b16e4ee-top, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/TestLogRolling-testLogRolling=c411983ff3bea3ce63a0c6f49ca7b025-8bb8a556f2bd479db428aa48b6543d77] into tmpdir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp, totalSize=90.0 K 2024-12-03T05:01:30,756 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:01:30,756 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:01:30,756 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da. 2024-12-03T05:01:30,756 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => d18ac3340fdcc16022f867385a7942da, NAME => 'TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-03T05:01:30,757 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling d18ac3340fdcc16022f867385a7942da 2024-12-03T05:01:30,757 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:01:30,757 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1c5b65c0b85d45fda44c90652b16e4ee.c411983ff3bea3ce63a0c6f49ca7b025, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1733202075660 2024-12-03T05:01:30,757 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for d18ac3340fdcc16022f867385a7942da 2024-12-03T05:01:30,757 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for d18ac3340fdcc16022f867385a7942da 2024-12-03T05:01:30,757 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=124beb052e88936506b4d7691f51b7dd, regionState=OPEN, openSeqNum=101, regionLocation=32beccd76b35,36269,1733202063527 2024-12-03T05:01:30,758 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=c411983ff3bea3ce63a0c6f49ca7b025-8bb8a556f2bd479db428aa48b6543d77, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1733202089920 2024-12-03T05:01:30,761 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-03T05:01:30,761 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-03T05:01:30,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 because future has completed 2024-12-03T05:01:30,761 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-12-03T05:01:30,774 INFO [StoreOpener-d18ac3340fdcc16022f867385a7942da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d18ac3340fdcc16022f867385a7942da 2024-12-03T05:01:30,783 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-03T05:01:30,783 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 in 233 msec 2024-12-03T05:01:30,786 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=124beb052e88936506b4d7691f51b7dd, ASSIGN in 399 msec 2024-12-03T05:01:30,789 INFO [StoreOpener-d18ac3340fdcc16022f867385a7942da-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d18ac3340fdcc16022f867385a7942da columnFamilyName info 2024-12-03T05:01:30,789 DEBUG [StoreOpener-d18ac3340fdcc16022f867385a7942da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:01:30,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/.tmp/info/e9f6db1eab3540f6bc06c6ad50d7ab50 is 193, key is TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd./info:regioninfo/1733202090757/Put/seqid=0 2024-12-03T05:01:30,817 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 124beb052e88936506b4d7691f51b7dd#info#compaction#63 average throughput is 5.13 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T05:01:30,817 DEBUG [StoreOpener-d18ac3340fdcc16022f867385a7942da-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/d18ac3340fdcc16022f867385a7942da/info/1c5b65c0b85d45fda44c90652b16e4ee.c411983ff3bea3ce63a0c6f49ca7b025->hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/1c5b65c0b85d45fda44c90652b16e4ee-bottom 2024-12-03T05:01:30,817 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/3ee9ca291eaf402b865b12fa6512b415 is 1080, key is row0062/info:/1733202087913/Put/seqid=0 2024-12-03T05:01:30,817 INFO [StoreOpener-d18ac3340fdcc16022f867385a7942da-1 {}] regionserver.HStore(327): Store=d18ac3340fdcc16022f867385a7942da/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T05:01:30,818 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for d18ac3340fdcc16022f867385a7942da 2024-12-03T05:01:30,821 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/d18ac3340fdcc16022f867385a7942da 2024-12-03T05:01:30,824 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/d18ac3340fdcc16022f867385a7942da 2024-12-03T05:01:30,824 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for d18ac3340fdcc16022f867385a7942da 2024-12-03T05:01:30,824 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for d18ac3340fdcc16022f867385a7942da 2024-12-03T05:01:30,827 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for d18ac3340fdcc16022f867385a7942da 2024-12-03T05:01:30,828 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened d18ac3340fdcc16022f867385a7942da; next sequenceid=101; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=801595, jitterRate=0.019281402230262756}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T05:01:30,828 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d18ac3340fdcc16022f867385a7942da 2024-12-03T05:01:30,828 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for d18ac3340fdcc16022f867385a7942da: Running coprocessor pre-open hook at 1733202090757Writing region info on filesystem at 1733202090757Initializing all the Stores at 1733202090760 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202090760Cleaning up temporary data from old regions at 1733202090824 (+64 ms)Running coprocessor post-open hooks at 1733202090828 (+4 ms)Region opened successfully at 1733202090828 2024-12-03T05:01:30,838 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da., pid=13, masterSystemTime=1733202090697 2024-12-03T05:01:30,838 DEBUG [RS:0;32beccd76b35:36269-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-03T05:01:30,838 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store d18ac3340fdcc16022f867385a7942da:info, priority=-2147483648, current under compaction store size is 2 2024-12-03T05:01:30,838 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:01:30,839 INFO [RS:0;32beccd76b35:36269-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da. 2024-12-03T05:01:30,839 DEBUG [RS:0;32beccd76b35:36269-longCompactions-0 {}] regionserver.HStore(1541): d18ac3340fdcc16022f867385a7942da/info is initiating minor compaction (all files) 2024-12-03T05:01:30,839 INFO [RS:0;32beccd76b35:36269-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d18ac3340fdcc16022f867385a7942da/info in TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da. 2024-12-03T05:01:30,840 INFO [RS:0;32beccd76b35:36269-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/d18ac3340fdcc16022f867385a7942da/info/1c5b65c0b85d45fda44c90652b16e4ee.c411983ff3bea3ce63a0c6f49ca7b025->hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/1c5b65c0b85d45fda44c90652b16e4ee-bottom] into tmpdir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/d18ac3340fdcc16022f867385a7942da/.tmp, totalSize=73.6 K 2024-12-03T05:01:30,840 DEBUG [RS:0;32beccd76b35:36269-longCompactions-0 {}] compactions.Compactor(225): Compacting 1c5b65c0b85d45fda44c90652b16e4ee.c411983ff3bea3ce63a0c6f49ca7b025, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733202075660 2024-12-03T05:01:30,842 DEBUG [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da. 2024-12-03T05:01:30,842 INFO [RS_OPEN_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da. 2024-12-03T05:01:30,846 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=d18ac3340fdcc16022f867385a7942da, regionState=OPEN, openSeqNum=101, regionLocation=32beccd76b35,36269,1733202063527 2024-12-03T05:01:30,849 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure d18ac3340fdcc16022f867385a7942da, server=32beccd76b35,36269,1733202063527 because future has completed 2024-12-03T05:01:30,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741849_1025 (size=9882) 2024-12-03T05:01:30,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741849_1025 (size=9882) 2024-12-03T05:01:30,865 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/.tmp/info/e9f6db1eab3540f6bc06c6ad50d7ab50 2024-12-03T05:01:30,876 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37267 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=32beccd76b35,36269,1733202063527, table=TestLogRolling-testLogRolling, region=d18ac3340fdcc16022f867385a7942da. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-03T05:01:30,892 INFO [RS:0;32beccd76b35:36269-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d18ac3340fdcc16022f867385a7942da#info#compaction#64 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T05:01:30,893 DEBUG [RS:0;32beccd76b35:36269-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/d18ac3340fdcc16022f867385a7942da/.tmp/info/972353c5a04944d0842d6a5713869d25 is 1080, key is row0001/info:/1733202075660/Put/seqid=0 2024-12-03T05:01:30,897 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-12-03T05:01:30,897 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure d18ac3340fdcc16022f867385a7942da, server=32beccd76b35,36269,1733202063527 in 346 msec 2024-12-03T05:01:30,901 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-12-03T05:01:30,901 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d18ac3340fdcc16022f867385a7942da, ASSIGN in 512 msec 2024-12-03T05:01:30,906 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=c411983ff3bea3ce63a0c6f49ca7b025, daughterA=d18ac3340fdcc16022f867385a7942da, daughterB=124beb052e88936506b4d7691f51b7dd in 912 msec 2024-12-03T05:01:30,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741850_1026 (size=21307) 2024-12-03T05:01:30,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741850_1026 (size=21307) 2024-12-03T05:01:30,964 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/.tmp/ns/5bff3f72b6854866bce6c1b775f30316 is 43, key is default/ns:d/1733202065443/Put/seqid=0 2024-12-03T05:01:31,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741851_1027 (size=70862) 2024-12-03T05:01:31,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741851_1027 (size=70862) 2024-12-03T05:01:31,050 DEBUG [RS:0;32beccd76b35:36269-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/d18ac3340fdcc16022f867385a7942da/.tmp/info/972353c5a04944d0842d6a5713869d25 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/d18ac3340fdcc16022f867385a7942da/info/972353c5a04944d0842d6a5713869d25 2024-12-03T05:01:31,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741852_1028 (size=5153) 2024-12-03T05:01:31,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741852_1028 (size=5153) 2024-12-03T05:01:31,073 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/.tmp/ns/5bff3f72b6854866bce6c1b775f30316 2024-12-03T05:01:31,078 INFO [RS:0;32beccd76b35:36269-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in d18ac3340fdcc16022f867385a7942da/info of d18ac3340fdcc16022f867385a7942da into 972353c5a04944d0842d6a5713869d25(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T05:01:31,079 DEBUG [RS:0;32beccd76b35:36269-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d18ac3340fdcc16022f867385a7942da: 2024-12-03T05:01:31,080 INFO [RS:0;32beccd76b35:36269-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da., storeName=d18ac3340fdcc16022f867385a7942da/info, priority=15, startTime=1733202090838; duration=0sec 2024-12-03T05:01:31,080 DEBUG [RS:0;32beccd76b35:36269-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:01:31,080 DEBUG [RS:0;32beccd76b35:36269-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d18ac3340fdcc16022f867385a7942da:info 2024-12-03T05:01:31,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/.tmp/table/4dbd3b8b56ef477caad6b613a095e23b is 65, key is TestLogRolling-testLogRolling/table:state/1733202066091/Put/seqid=0 2024-12-03T05:01:31,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741853_1029 (size=5340) 2024-12-03T05:01:31,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741853_1029 (size=5340) 2024-12-03T05:01:31,201 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/.tmp/table/4dbd3b8b56ef477caad6b613a095e23b 2024-12-03T05:01:31,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/.tmp/info/e9f6db1eab3540f6bc06c6ad50d7ab50 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/info/e9f6db1eab3540f6bc06c6ad50d7ab50 2024-12-03T05:01:31,237 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/info/e9f6db1eab3540f6bc06c6ad50d7ab50, entries=30, sequenceid=17, filesize=9.7 K 2024-12-03T05:01:31,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/.tmp/ns/5bff3f72b6854866bce6c1b775f30316 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/ns/5bff3f72b6854866bce6c1b775f30316 2024-12-03T05:01:31,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/ns/5bff3f72b6854866bce6c1b775f30316, entries=2, sequenceid=17, filesize=5.0 K 2024-12-03T05:01:31,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/.tmp/table/4dbd3b8b56ef477caad6b613a095e23b as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/table/4dbd3b8b56ef477caad6b613a095e23b 2024-12-03T05:01:31,259 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/table/4dbd3b8b56ef477caad6b613a095e23b, entries=2, sequenceid=17, filesize=5.2 K 2024-12-03T05:01:31,260 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 499ms, sequenceid=17, compaction requested=false 2024-12-03T05:01:31,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-03T05:01:31,322 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/3ee9ca291eaf402b865b12fa6512b415 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/3ee9ca291eaf402b865b12fa6512b415 2024-12-03T05:01:31,358 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in 124beb052e88936506b4d7691f51b7dd/info of 124beb052e88936506b4d7691f51b7dd into 3ee9ca291eaf402b865b12fa6512b415(size=20.8 K), total size for store is 20.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T05:01:31,358 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:01:31,358 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., storeName=124beb052e88936506b4d7691f51b7dd/info, priority=14, startTime=1733202090752; duration=0sec 2024-12-03T05:01:31,358 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:01:31,358 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 124beb052e88936506b4d7691f51b7dd:info 2024-12-03T05:01:31,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:31,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:32,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:32,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:33,363 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T05:01:33,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:33,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:34,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:34,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:35,238 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,238 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,238 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,238 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:35,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:35,786 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T05:01:35,790 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,791 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,791 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,791 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,792 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,792 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,844 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,844 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,844 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:35,847 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:01:36,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:36,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:37,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:37,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:38,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:38,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:39,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:39,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:39,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:41112 deadline: 1733202109989, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. is not online on 32beccd76b35,36269,1733202063527 2024-12-03T05:01:39,990 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., hostname=32beccd76b35,36269,1733202063527, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., hostname=32beccd76b35,36269,1733202063527, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. is not online on 32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T05:01:39,990 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., hostname=32beccd76b35,36269,1733202063527, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025. is not online on 32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T05:01:39,990 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733202065568.c411983ff3bea3ce63a0c6f49ca7b025., hostname=32beccd76b35,36269,1733202063527, seqNum=2 from cache 2024-12-03T05:01:40,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:40,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:41,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:41,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:42,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:42,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:43,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:43,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:44,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:44,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:45,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:45,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:45,509 INFO [master/32beccd76b35:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T05:01:45,509 INFO [master/32beccd76b35:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T05:01:46,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:46,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:47,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:47,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:48,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:48,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:49,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:49,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:50,361 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-12-03T05:01:50,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:50,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:51,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:51,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:52,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:52,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:53,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:53,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:54,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:54,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:55,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:55,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:56,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:56,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:57,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:57,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:58,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:58,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:59,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:01:59,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:00,135 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0077', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101] 2024-12-03T05:02:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:02:00,146 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 124beb052e88936506b4d7691f51b7dd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T05:02:00,151 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/9d28839a928045d68c094fb0a51d23e8 is 1080, key is row0077/info:/1733202120136/Put/seqid=0 2024-12-03T05:02:00,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741854_1030 (size=12509) 2024-12-03T05:02:00,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741854_1030 (size=12509) 2024-12-03T05:02:00,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/9d28839a928045d68c094fb0a51d23e8 2024-12-03T05:02:00,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/9d28839a928045d68c094fb0a51d23e8 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/9d28839a928045d68c094fb0a51d23e8 2024-12-03T05:02:00,169 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/9d28839a928045d68c094fb0a51d23e8, entries=7, sequenceid=111, filesize=12.2 K 2024-12-03T05:02:00,170 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 124beb052e88936506b4d7691f51b7dd in 23ms, sequenceid=111, compaction requested=false 2024-12-03T05:02:00,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:00,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:00,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:01,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:01,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:02,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:02:02,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 124beb052e88936506b4d7691f51b7dd 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-03T05:02:02,176 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/f1f71801ebe24231b2cc9d1dbd46e4da is 1080, key is row0084/info:/1733202120147/Put/seqid=0 2024-12-03T05:02:02,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741855_1031 (size=20064) 2024-12-03T05:02:02,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741855_1031 (size=20064) 2024-12-03T05:02:02,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T05:02:02,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:41112 deadline: 1733202132200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 2024-12-03T05:02:02,201 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101 , the old value is region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T05:02:02,201 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T05:02:02,201 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101 because the exception is null or not the one we care about 2024-12-03T05:02:02,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:02,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:02,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/f1f71801ebe24231b2cc9d1dbd46e4da 2024-12-03T05:02:02,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/f1f71801ebe24231b2cc9d1dbd46e4da as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/f1f71801ebe24231b2cc9d1dbd46e4da 2024-12-03T05:02:02,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/f1f71801ebe24231b2cc9d1dbd46e4da, entries=14, sequenceid=128, filesize=19.6 K 2024-12-03T05:02:02,595 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=15.76 KB/16140 for 124beb052e88936506b4d7691f51b7dd in 423ms, sequenceid=128, compaction requested=true 2024-12-03T05:02:02,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:02,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 124beb052e88936506b4d7691f51b7dd:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T05:02:02,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:02:02,595 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T05:02:02,596 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 53880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T05:02:02,596 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1541): 124beb052e88936506b4d7691f51b7dd/info is initiating minor compaction (all files) 2024-12-03T05:02:02,596 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 124beb052e88936506b4d7691f51b7dd/info in TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:02:02,596 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/3ee9ca291eaf402b865b12fa6512b415, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/9d28839a928045d68c094fb0a51d23e8, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/f1f71801ebe24231b2cc9d1dbd46e4da] into tmpdir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp, totalSize=52.6 K 2024-12-03T05:02:02,597 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3ee9ca291eaf402b865b12fa6512b415, keycount=15, bloomtype=ROW, size=20.8 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1733202087913 2024-12-03T05:02:02,597 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9d28839a928045d68c094fb0a51d23e8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=111, earliestPutTs=1733202120136 2024-12-03T05:02:02,598 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting f1f71801ebe24231b2cc9d1dbd46e4da, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733202120147 2024-12-03T05:02:02,608 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 124beb052e88936506b4d7691f51b7dd#info#compaction#69 average throughput is 36.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T05:02:02,609 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/c6ee57c0efa64336930d78561c2c7dab is 1080, key is row0062/info:/1733202087913/Put/seqid=0 2024-12-03T05:02:02,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741856_1032 (size=44066) 2024-12-03T05:02:02,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741856_1032 (size=44066) 2024-12-03T05:02:02,618 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/c6ee57c0efa64336930d78561c2c7dab as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/c6ee57c0efa64336930d78561c2c7dab 2024-12-03T05:02:02,623 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 124beb052e88936506b4d7691f51b7dd/info of 124beb052e88936506b4d7691f51b7dd into c6ee57c0efa64336930d78561c2c7dab(size=43.0 K), total size for store is 43.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T05:02:02,623 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:02,623 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., storeName=124beb052e88936506b4d7691f51b7dd/info, priority=13, startTime=1733202122595; duration=0sec 2024-12-03T05:02:02,623 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:02:02,623 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 124beb052e88936506b4d7691f51b7dd:info 2024-12-03T05:02:03,363 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T05:02:03,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:03,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:03,445 DEBUG [master/32beccd76b35:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=58, reuseRatio=86.57% 2024-12-03T05:02:03,446 DEBUG [master/32beccd76b35:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-03T05:02:04,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:04,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:05,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:05,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:06,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:06,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:07,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:07,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:08,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:08,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:09,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:09,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:10,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:10,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:10,590 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,590 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,590 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,591 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,591 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,591 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,592 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,592 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:10,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,146 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-03T05:02:11,147 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,147 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,147 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,147 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,148 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,148 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,170 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,170 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,171 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,174 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,175 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,175 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-03T05:02:11,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:11,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:12,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:02:12,250 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 124beb052e88936506b4d7691f51b7dd 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-03T05:02:12,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/e43d5487a5c7493e8c1d2155d12d9a7c is 1080, key is row0098/info:/1733202122173/Put/seqid=0 2024-12-03T05:02:12,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741857_1033 (size=22238) 2024-12-03T05:02:12,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741857_1033 (size=22238) 2024-12-03T05:02:12,306 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=148 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/e43d5487a5c7493e8c1d2155d12d9a7c 2024-12-03T05:02:12,317 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/e43d5487a5c7493e8c1d2155d12d9a7c as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/e43d5487a5c7493e8c1d2155d12d9a7c 2024-12-03T05:02:12,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T05:02:12,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:41112 deadline: 1733202142324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 2024-12-03T05:02:12,326 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101 , the old value is region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T05:02:12,326 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/e43d5487a5c7493e8c1d2155d12d9a7c, entries=16, sequenceid=148, filesize=21.7 K 2024-12-03T05:02:12,326 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T05:02:12,326 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101 because the exception is null or not the one we care about 2024-12-03T05:02:12,327 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for 124beb052e88936506b4d7691f51b7dd in 77ms, sequenceid=148, compaction requested=false 2024-12-03T05:02:12,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:12,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:12,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:13,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:13,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:14,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:14,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:15,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:15,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:15,703 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 124beb052e88936506b4d7691f51b7dd, had cached 0 bytes from a total of 66304 2024-12-03T05:02:15,757 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region d18ac3340fdcc16022f867385a7942da, had cached 0 bytes from a total of 70862 2024-12-03T05:02:16,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:16,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:17,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:17,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:18,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:18,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:19,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:19,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:20,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:20,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:21,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:21,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:22,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:02:22,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 124beb052e88936506b4d7691f51b7dd 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-03T05:02:22,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/3fe3677a53ec47248aab85102c0d742a is 1080, key is row0114/info:/1733202132253/Put/seqid=0 2024-12-03T05:02:22,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741858_1034 (size=20078) 2024-12-03T05:02:22,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741858_1034 (size=20078) 2024-12-03T05:02:22,417 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/3fe3677a53ec47248aab85102c0d742a 2024-12-03T05:02:22,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:22,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:22,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/3fe3677a53ec47248aab85102c0d742a as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/3fe3677a53ec47248aab85102c0d742a 2024-12-03T05:02:22,449 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/3fe3677a53ec47248aab85102c0d742a, entries=14, sequenceid=165, filesize=19.6 K 2024-12-03T05:02:22,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=1.05 KB/1076 for 124beb052e88936506b4d7691f51b7dd in 98ms, sequenceid=165, compaction requested=true 2024-12-03T05:02:22,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:22,451 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T05:02:22,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 124beb052e88936506b4d7691f51b7dd:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T05:02:22,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:02:22,452 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 86382 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T05:02:22,452 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1541): 124beb052e88936506b4d7691f51b7dd/info is initiating minor compaction (all files) 2024-12-03T05:02:22,452 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 124beb052e88936506b4d7691f51b7dd/info in TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:02:22,452 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/c6ee57c0efa64336930d78561c2c7dab, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/e43d5487a5c7493e8c1d2155d12d9a7c, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/3fe3677a53ec47248aab85102c0d742a] into tmpdir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp, totalSize=84.4 K 2024-12-03T05:02:22,453 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting c6ee57c0efa64336930d78561c2c7dab, keycount=36, bloomtype=ROW, size=43.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733202087913 2024-12-03T05:02:22,454 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting e43d5487a5c7493e8c1d2155d12d9a7c, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=148, earliestPutTs=1733202122173 2024-12-03T05:02:22,454 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3fe3677a53ec47248aab85102c0d742a, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1733202132253 2024-12-03T05:02:22,484 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 124beb052e88936506b4d7691f51b7dd#info#compaction#72 average throughput is 33.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T05:02:22,490 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/109be95eed2d4335b879606e3cbd6351 is 1080, key is row0062/info:/1733202087913/Put/seqid=0 2024-12-03T05:02:22,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741859_1035 (size=76649) 2024-12-03T05:02:22,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741859_1035 (size=76649) 2024-12-03T05:02:22,510 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/109be95eed2d4335b879606e3cbd6351 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/109be95eed2d4335b879606e3cbd6351 2024-12-03T05:02:22,519 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 124beb052e88936506b4d7691f51b7dd/info of 124beb052e88936506b4d7691f51b7dd into 109be95eed2d4335b879606e3cbd6351(size=74.9 K), total size for store is 74.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T05:02:22,519 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:22,519 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., storeName=124beb052e88936506b4d7691f51b7dd/info, priority=13, startTime=1733202142451; duration=0sec 2024-12-03T05:02:22,519 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:02:22,519 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 124beb052e88936506b4d7691f51b7dd:info 2024-12-03T05:02:23,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:23,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:02:24,366 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 124beb052e88936506b4d7691f51b7dd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T05:02:24,370 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/d6fb0115da6c4b46abda54480379392b is 1080, key is row0128/info:/1733202142354/Put/seqid=0 2024-12-03T05:02:24,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741860_1036 (size=12516) 2024-12-03T05:02:24,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741860_1036 (size=12516) 2024-12-03T05:02:24,381 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/d6fb0115da6c4b46abda54480379392b 2024-12-03T05:02:24,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/d6fb0115da6c4b46abda54480379392b as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/d6fb0115da6c4b46abda54480379392b 2024-12-03T05:02:24,394 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/d6fb0115da6c4b46abda54480379392b, entries=7, sequenceid=176, filesize=12.2 K 2024-12-03T05:02:24,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=17.86 KB/18292 for 124beb052e88936506b4d7691f51b7dd in 30ms, sequenceid=176, compaction requested=false 2024-12-03T05:02:24,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:24,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:02:24,399 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 124beb052e88936506b4d7691f51b7dd 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-03T05:02:24,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:24,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:24,425 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/bf3312352cc74e438209571496a71b0d is 1080, key is row0135/info:/1733202144367/Put/seqid=0 2024-12-03T05:02:24,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741861_1037 (size=24394) 2024-12-03T05:02:24,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741861_1037 (size=24394) 2024-12-03T05:02:24,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/bf3312352cc74e438209571496a71b0d 2024-12-03T05:02:24,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/bf3312352cc74e438209571496a71b0d as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/bf3312352cc74e438209571496a71b0d 2024-12-03T05:02:24,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/bf3312352cc74e438209571496a71b0d, entries=18, sequenceid=197, filesize=23.8 K 2024-12-03T05:02:24,443 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=8.41 KB/8608 for 124beb052e88936506b4d7691f51b7dd in 43ms, sequenceid=197, compaction requested=true 2024-12-03T05:02:24,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:24,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 124beb052e88936506b4d7691f51b7dd:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T05:02:24,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:02:24,443 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T05:02:24,444 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 113559 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T05:02:24,444 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1541): 124beb052e88936506b4d7691f51b7dd/info is initiating minor compaction (all files) 2024-12-03T05:02:24,444 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 124beb052e88936506b4d7691f51b7dd/info in TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:02:24,444 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/109be95eed2d4335b879606e3cbd6351, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/d6fb0115da6c4b46abda54480379392b, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/bf3312352cc74e438209571496a71b0d] into tmpdir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp, totalSize=110.9 K 2024-12-03T05:02:24,445 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 109be95eed2d4335b879606e3cbd6351, keycount=66, bloomtype=ROW, size=74.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1733202087913 2024-12-03T05:02:24,445 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting d6fb0115da6c4b46abda54480379392b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733202142354 2024-12-03T05:02:24,446 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting bf3312352cc74e438209571496a71b0d, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733202144367 2024-12-03T05:02:24,458 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 124beb052e88936506b4d7691f51b7dd#info#compaction#75 average throughput is 31.13 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T05:02:24,459 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/598fa62b12824d5d887b73bdc49ce12f is 1080, key is row0062/info:/1733202087913/Put/seqid=0 2024-12-03T05:02:24,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741862_1038 (size=103705) 2024-12-03T05:02:24,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741862_1038 (size=103705) 2024-12-03T05:02:24,474 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/598fa62b12824d5d887b73bdc49ce12f as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/598fa62b12824d5d887b73bdc49ce12f 2024-12-03T05:02:24,479 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 124beb052e88936506b4d7691f51b7dd/info of 124beb052e88936506b4d7691f51b7dd into 598fa62b12824d5d887b73bdc49ce12f(size=101.3 K), total size for store is 101.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T05:02:24,479 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:24,479 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., storeName=124beb052e88936506b4d7691f51b7dd/info, priority=13, startTime=1733202144443; duration=0sec 2024-12-03T05:02:24,480 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:02:24,480 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 124beb052e88936506b4d7691f51b7dd:info 2024-12-03T05:02:25,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:25,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:26,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:26,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:26,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:02:26,437 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 124beb052e88936506b4d7691f51b7dd 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-03T05:02:26,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/1c477817965445d284db8e83dcffc27c is 1080, key is row0153/info:/1733202144400/Put/seqid=0 2024-12-03T05:02:26,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741863_1039 (size=14672) 2024-12-03T05:02:26,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741863_1039 (size=14672) 2024-12-03T05:02:26,457 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/1c477817965445d284db8e83dcffc27c 2024-12-03T05:02:26,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/1c477817965445d284db8e83dcffc27c as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/1c477817965445d284db8e83dcffc27c 2024-12-03T05:02:26,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/1c477817965445d284db8e83dcffc27c, entries=9, sequenceid=210, filesize=14.3 K 2024-12-03T05:02:26,469 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=18.91 KB/19368 for 124beb052e88936506b4d7691f51b7dd in 32ms, sequenceid=210, compaction requested=false 2024-12-03T05:02:26,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:26,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:02:26,471 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 124beb052e88936506b4d7691f51b7dd 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-12-03T05:02:26,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/6c73417a8bc5487bae432629bc64f90c is 1080, key is row0162/info:/1733202146438/Put/seqid=0 2024-12-03T05:02:26,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741864_1040 (size=25472) 2024-12-03T05:02:26,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741864_1040 (size=25472) 2024-12-03T05:02:26,483 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/6c73417a8bc5487bae432629bc64f90c 2024-12-03T05:02:26,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T05:02:26,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:41112 deadline: 1733202156487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 2024-12-03T05:02:26,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/6c73417a8bc5487bae432629bc64f90c as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/6c73417a8bc5487bae432629bc64f90c 2024-12-03T05:02:26,488 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101 , the old value is region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T05:02:26,488 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T05:02:26,488 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101 because the exception is null or not the one we care about 2024-12-03T05:02:26,493 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/6c73417a8bc5487bae432629bc64f90c, entries=19, sequenceid=232, filesize=24.9 K 2024-12-03T05:02:26,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=10.51 KB/10760 for 124beb052e88936506b4d7691f51b7dd in 24ms, sequenceid=232, compaction requested=true 2024-12-03T05:02:26,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:26,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 124beb052e88936506b4d7691f51b7dd:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T05:02:26,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:02:26,494 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T05:02:26,495 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 143849 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T05:02:26,495 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1541): 124beb052e88936506b4d7691f51b7dd/info is initiating minor compaction (all files) 2024-12-03T05:02:26,495 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 124beb052e88936506b4d7691f51b7dd/info in TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:02:26,495 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/598fa62b12824d5d887b73bdc49ce12f, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/1c477817965445d284db8e83dcffc27c, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/6c73417a8bc5487bae432629bc64f90c] into tmpdir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp, totalSize=140.5 K 2024-12-03T05:02:26,495 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 598fa62b12824d5d887b73bdc49ce12f, keycount=91, bloomtype=ROW, size=101.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733202087913 2024-12-03T05:02:26,496 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1c477817965445d284db8e83dcffc27c, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733202144400 2024-12-03T05:02:26,496 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6c73417a8bc5487bae432629bc64f90c, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733202146438 2024-12-03T05:02:26,507 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 124beb052e88936506b4d7691f51b7dd#info#compaction#78 average throughput is 40.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T05:02:26,508 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/3b90478f2e4d4286b8394f460076e05a is 1080, key is row0062/info:/1733202087913/Put/seqid=0 2024-12-03T05:02:26,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741865_1041 (size=134127) 2024-12-03T05:02:26,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741865_1041 (size=134127) 2024-12-03T05:02:26,515 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/3b90478f2e4d4286b8394f460076e05a as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/3b90478f2e4d4286b8394f460076e05a 2024-12-03T05:02:26,519 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 124beb052e88936506b4d7691f51b7dd/info of 124beb052e88936506b4d7691f51b7dd into 3b90478f2e4d4286b8394f460076e05a(size=131.0 K), total size for store is 131.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T05:02:26,519 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:26,519 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., storeName=124beb052e88936506b4d7691f51b7dd/info, priority=13, startTime=1733202146494; duration=0sec 2024-12-03T05:02:26,519 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:02:26,519 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 124beb052e88936506b4d7691f51b7dd:info 2024-12-03T05:02:27,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:27,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:28,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:28,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:29,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:29,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:30,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:30,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:31,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:31,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:32,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:32,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:33,364 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T05:02:33,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:33,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:34,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:34,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:35,362 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-12-03T05:02:35,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:35,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:36,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:36,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:36,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:02:36,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 124beb052e88936506b4d7691f51b7dd 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-03T05:02:36,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/bf8ccfe215364848a054ca8c5b3b5d27 is 1080, key is row0181/info:/1733202146471/Put/seqid=0 2024-12-03T05:02:36,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741866_1042 (size=16828) 2024-12-03T05:02:36,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741866_1042 (size=16828) 2024-12-03T05:02:36,531 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/bf8ccfe215364848a054ca8c5b3b5d27 2024-12-03T05:02:36,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/bf8ccfe215364848a054ca8c5b3b5d27 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/bf8ccfe215364848a054ca8c5b3b5d27 2024-12-03T05:02:36,540 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/bf8ccfe215364848a054ca8c5b3b5d27, entries=11, sequenceid=247, filesize=16.4 K 2024-12-03T05:02:36,541 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=1.05 KB/1076 for 124beb052e88936506b4d7691f51b7dd in 21ms, sequenceid=247, compaction requested=false 2024-12-03T05:02:36,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:37,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:37,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:38,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:38,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:38,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:02:38,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 124beb052e88936506b4d7691f51b7dd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-03T05:02:38,536 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/eea6df3d1aa345819ecd4d9dd0661077 is 1080, key is row0192/info:/1733202156522/Put/seqid=0 2024-12-03T05:02:38,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741867_1043 (size=12517) 2024-12-03T05:02:38,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741867_1043 (size=12517) 2024-12-03T05:02:38,546 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/eea6df3d1aa345819ecd4d9dd0661077 2024-12-03T05:02:38,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/eea6df3d1aa345819ecd4d9dd0661077 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/eea6df3d1aa345819ecd4d9dd0661077 2024-12-03T05:02:38,557 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/eea6df3d1aa345819ecd4d9dd0661077, entries=7, sequenceid=257, filesize=12.2 K 2024-12-03T05:02:38,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 124beb052e88936506b4d7691f51b7dd in 26ms, sequenceid=257, compaction requested=true 2024-12-03T05:02:38,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:38,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 124beb052e88936506b4d7691f51b7dd:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T05:02:38,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:02:38,558 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T05:02:38,559 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 163472 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T05:02:38,559 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1541): 124beb052e88936506b4d7691f51b7dd/info is initiating minor compaction (all files) 2024-12-03T05:02:38,559 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 124beb052e88936506b4d7691f51b7dd/info in TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:02:38,559 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/3b90478f2e4d4286b8394f460076e05a, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/bf8ccfe215364848a054ca8c5b3b5d27, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/eea6df3d1aa345819ecd4d9dd0661077] into tmpdir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp, totalSize=159.6 K 2024-12-03T05:02:38,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:02:38,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 124beb052e88936506b4d7691f51b7dd 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-03T05:02:38,560 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3b90478f2e4d4286b8394f460076e05a, keycount=119, bloomtype=ROW, size=131.0 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733202087913 2024-12-03T05:02:38,560 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting bf8ccfe215364848a054ca8c5b3b5d27, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1733202146471 2024-12-03T05:02:38,561 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting eea6df3d1aa345819ecd4d9dd0661077, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1733202156522 2024-12-03T05:02:38,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/f8a0d37d85fe4873bc39965c3cda66e7 is 1080, key is row0199/info:/1733202158533/Put/seqid=0 2024-12-03T05:02:38,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741868_1044 (size=22254) 2024-12-03T05:02:38,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741868_1044 (size=22254) 2024-12-03T05:02:38,572 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/f8a0d37d85fe4873bc39965c3cda66e7 2024-12-03T05:02:38,577 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 124beb052e88936506b4d7691f51b7dd#info#compaction#82 average throughput is 70.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T05:02:38,578 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/cad2b8a8ad2946619fcdeaea237fff83 is 1080, key is row0062/info:/1733202087913/Put/seqid=0 2024-12-03T05:02:38,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/f8a0d37d85fe4873bc39965c3cda66e7 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/f8a0d37d85fe4873bc39965c3cda66e7 2024-12-03T05:02:38,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741869_1045 (size=153707) 2024-12-03T05:02:38,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741869_1045 (size=153707) 2024-12-03T05:02:38,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/f8a0d37d85fe4873bc39965c3cda66e7, entries=16, sequenceid=276, filesize=21.7 K 2024-12-03T05:02:38,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=10.51 KB/10760 for 124beb052e88936506b4d7691f51b7dd in 31ms, sequenceid=276, compaction requested=false 2024-12-03T05:02:38,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:38,593 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/cad2b8a8ad2946619fcdeaea237fff83 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/cad2b8a8ad2946619fcdeaea237fff83 2024-12-03T05:02:38,599 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 124beb052e88936506b4d7691f51b7dd/info of 124beb052e88936506b4d7691f51b7dd into cad2b8a8ad2946619fcdeaea237fff83(size=150.1 K), total size for store is 171.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T05:02:38,599 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:38,599 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., storeName=124beb052e88936506b4d7691f51b7dd/info, priority=13, startTime=1733202158558; duration=0sec 2024-12-03T05:02:38,599 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:02:38,599 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 124beb052e88936506b4d7691f51b7dd:info 2024-12-03T05:02:39,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:39,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:39,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 after 196195ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T05:02:39,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta after 196168ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor204.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T05:02:40,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:40,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:40,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:02:40,589 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 124beb052e88936506b4d7691f51b7dd 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-03T05:02:40,593 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/37a2ffc6c2144c068c331392a5f08b0d is 1080, key is row0215/info:/1733202158561/Put/seqid=0 2024-12-03T05:02:40,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741870_1046 (size=16839) 2024-12-03T05:02:40,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741870_1046 (size=16839) 2024-12-03T05:02:40,607 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/37a2ffc6c2144c068c331392a5f08b0d 2024-12-03T05:02:40,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/37a2ffc6c2144c068c331392a5f08b0d as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/37a2ffc6c2144c068c331392a5f08b0d 2024-12-03T05:02:40,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/37a2ffc6c2144c068c331392a5f08b0d, entries=11, sequenceid=291, filesize=16.4 K 2024-12-03T05:02:40,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=17.86 KB/18292 for 124beb052e88936506b4d7691f51b7dd in 34ms, sequenceid=291, compaction requested=true 2024-12-03T05:02:40,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:40,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 124beb052e88936506b4d7691f51b7dd:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T05:02:40,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:02:40,623 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T05:02:40,624 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192800 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T05:02:40,624 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1541): 124beb052e88936506b4d7691f51b7dd/info is initiating minor compaction (all files) 2024-12-03T05:02:40,624 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 124beb052e88936506b4d7691f51b7dd/info in TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:02:40,624 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/cad2b8a8ad2946619fcdeaea237fff83, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/f8a0d37d85fe4873bc39965c3cda66e7, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/37a2ffc6c2144c068c331392a5f08b0d] into tmpdir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp, totalSize=188.3 K 2024-12-03T05:02:40,624 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting cad2b8a8ad2946619fcdeaea237fff83, keycount=137, bloomtype=ROW, size=150.1 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1733202087913 2024-12-03T05:02:40,625 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting f8a0d37d85fe4873bc39965c3cda66e7, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733202158533 2024-12-03T05:02:40,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:02:40,626 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 124beb052e88936506b4d7691f51b7dd 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-03T05:02:40,629 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 37a2ffc6c2144c068c331392a5f08b0d, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733202158561 2024-12-03T05:02:40,634 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/0504eceb52e547eab2ca133565fb22d7 is 1080, key is row0226/info:/1733202160590/Put/seqid=0 2024-12-03T05:02:40,659 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 124beb052e88936506b4d7691f51b7dd#info#compaction#85 average throughput is 42.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T05:02:40,660 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/2c470d44f77845ee931b4920cb40f2ff is 1080, key is row0062/info:/1733202087913/Put/seqid=0 2024-12-03T05:02:40,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-03T05:02:40,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:41112 deadline: 1733202170661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 2024-12-03T05:02:40,662 DEBUG [Time-limited test {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101 , the old value is region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T05:02:40,662 DEBUG [Time-limited test {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=124beb052e88936506b4d7691f51b7dd, server=32beccd76b35,36269,1733202063527 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-03T05:02:40,662 DEBUG [Time-limited test {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., hostname=32beccd76b35,36269,1733202063527, seqNum=101 because the exception is null or not the one we care about 2024-12-03T05:02:40,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741871_1047 (size=24412) 2024-12-03T05:02:40,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741871_1047 (size=24412) 2024-12-03T05:02:40,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/0504eceb52e547eab2ca133565fb22d7 2024-12-03T05:02:40,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741872_1048 (size=182950) 2024-12-03T05:02:40,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741872_1048 (size=182950) 2024-12-03T05:02:40,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/0504eceb52e547eab2ca133565fb22d7 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/0504eceb52e547eab2ca133565fb22d7 2024-12-03T05:02:40,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/0504eceb52e547eab2ca133565fb22d7, entries=18, sequenceid=312, filesize=23.8 K 2024-12-03T05:02:40,696 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for 124beb052e88936506b4d7691f51b7dd in 70ms, sequenceid=312, compaction requested=false 2024-12-03T05:02:40,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:41,091 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/2c470d44f77845ee931b4920cb40f2ff as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/2c470d44f77845ee931b4920cb40f2ff 2024-12-03T05:02:41,098 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 124beb052e88936506b4d7691f51b7dd/info of 124beb052e88936506b4d7691f51b7dd into 2c470d44f77845ee931b4920cb40f2ff(size=178.7 K), total size for store is 202.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T05:02:41,098 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:41,098 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., storeName=124beb052e88936506b4d7691f51b7dd/info, priority=13, startTime=1733202160623; duration=0sec 2024-12-03T05:02:41,099 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:02:41,099 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 124beb052e88936506b4d7691f51b7dd:info 2024-12-03T05:02:41,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:41,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:42,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:42,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:43,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:43,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:44,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:44,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:45,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:45,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:46,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:46,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:47,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:47,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:48,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:48,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:49,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:49,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:50,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:50,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:50,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36269 {}] regionserver.HRegion(8855): Flush requested on 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:02:50,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 124beb052e88936506b4d7691f51b7dd 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-03T05:02:50,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/895a68811aa64a1b830307461757cc01 is 1080, key is row0244/info:/1733202160627/Put/seqid=0 2024-12-03T05:02:50,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741873_1049 (size=17918) 2024-12-03T05:02:50,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741873_1049 (size=17918) 2024-12-03T05:02:50,740 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/895a68811aa64a1b830307461757cc01 2024-12-03T05:02:50,745 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/895a68811aa64a1b830307461757cc01 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/895a68811aa64a1b830307461757cc01 2024-12-03T05:02:50,750 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/895a68811aa64a1b830307461757cc01, entries=12, sequenceid=328, filesize=17.5 K 2024-12-03T05:02:50,751 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 124beb052e88936506b4d7691f51b7dd in 20ms, sequenceid=328, compaction requested=true 2024-12-03T05:02:50,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:50,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 124beb052e88936506b4d7691f51b7dd:info, priority=-2147483648, current under compaction store size is 1 2024-12-03T05:02:50,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:02:50,751 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T05:02:50,752 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 225280 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T05:02:50,752 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1541): 124beb052e88936506b4d7691f51b7dd/info is initiating minor compaction (all files) 2024-12-03T05:02:50,752 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 124beb052e88936506b4d7691f51b7dd/info in TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:02:50,752 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/2c470d44f77845ee931b4920cb40f2ff, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/0504eceb52e547eab2ca133565fb22d7, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/895a68811aa64a1b830307461757cc01] into tmpdir=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp, totalSize=220 K 2024-12-03T05:02:50,752 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2c470d44f77845ee931b4920cb40f2ff, keycount=164, bloomtype=ROW, size=178.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733202087913 2024-12-03T05:02:50,753 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0504eceb52e547eab2ca133565fb22d7, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733202160590 2024-12-03T05:02:50,753 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] compactions.Compactor(225): Compacting 895a68811aa64a1b830307461757cc01, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1733202160627 2024-12-03T05:02:50,764 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 124beb052e88936506b4d7691f51b7dd#info#compaction#87 average throughput is 99.54 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T05:02:50,765 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/581fdb3b94a7486b8a219c08ec9784af is 1080, key is row0062/info:/1733202087913/Put/seqid=0 2024-12-03T05:02:50,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741874_1050 (size=215499) 2024-12-03T05:02:50,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741874_1050 (size=215499) 2024-12-03T05:02:50,773 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/581fdb3b94a7486b8a219c08ec9784af as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/581fdb3b94a7486b8a219c08ec9784af 2024-12-03T05:02:50,778 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 124beb052e88936506b4d7691f51b7dd/info of 124beb052e88936506b4d7691f51b7dd into 581fdb3b94a7486b8a219c08ec9784af(size=210.4 K), total size for store is 210.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T05:02:50,778 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:50,778 INFO [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd., storeName=124beb052e88936506b4d7691f51b7dd/info, priority=13, startTime=1733202170751; duration=0sec 2024-12-03T05:02:50,778 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T05:02:50,778 DEBUG [RS:0;32beccd76b35:36269-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 124beb052e88936506b4d7691f51b7dd:info 2024-12-03T05:02:51,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:51,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:52,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:52,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:52,733 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-03T05:02:52,734 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C36269%2C1733202063527.1733202172733 2024-12-03T05:02:52,758 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:52,758 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:52,758 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:52,758 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:52,758 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:52,759 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/WALs/32beccd76b35,36269,1733202063527/32beccd76b35%2C36269%2C1733202063527.1733202064783 with entries=311, filesize=307.98 KB; new WAL /user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/WALs/32beccd76b35,36269,1733202063527/32beccd76b35%2C36269%2C1733202063527.1733202172733 2024-12-03T05:02:52,762 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34719:34719),(127.0.0.1/127.0.0.1:42049:42049)] 2024-12-03T05:02:52,762 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/WALs/32beccd76b35,36269,1733202063527/32beccd76b35%2C36269%2C1733202063527.1733202064783 is not closed yet, will try archiving it next time 2024-12-03T05:02:52,765 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d18ac3340fdcc16022f867385a7942da: 2024-12-03T05:02:52,765 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-12-03T05:02:52,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741833_1009 (size=315382) 2024-12-03T05:02:52,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741833_1009 (size=315382) 2024-12-03T05:02:52,771 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/.tmp/info/5fb5be5cea874a4e8758d37e3b415632 is 186, key is TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da./info:regioninfo/1733202090846/Put/seqid=0 2024-12-03T05:02:52,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741876_1052 (size=6153) 2024-12-03T05:02:52,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741876_1052 (size=6153) 2024-12-03T05:02:52,779 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/.tmp/info/5fb5be5cea874a4e8758d37e3b415632 2024-12-03T05:02:52,785 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/.tmp/info/5fb5be5cea874a4e8758d37e3b415632 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/info/5fb5be5cea874a4e8758d37e3b415632 2024-12-03T05:02:52,790 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/info/5fb5be5cea874a4e8758d37e3b415632, entries=5, sequenceid=21, filesize=6.0 K 2024-12-03T05:02:52,791 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 26ms, sequenceid=21, compaction requested=false 2024-12-03T05:02:52,791 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-03T05:02:52,791 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 124beb052e88936506b4d7691f51b7dd 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-03T05:02:52,798 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/b31c602dd4d54f6d8a23f7f177dca62f is 1080, key is row0256/info:/1733202170732/Put/seqid=0 2024-12-03T05:02:52,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741877_1053 (size=6035) 2024-12-03T05:02:52,803 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/b31c602dd4d54f6d8a23f7f177dca62f 2024-12-03T05:02:52,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741877_1053 (size=6035) 2024-12-03T05:02:52,807 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/.tmp/info/b31c602dd4d54f6d8a23f7f177dca62f as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/b31c602dd4d54f6d8a23f7f177dca62f 2024-12-03T05:02:52,812 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/b31c602dd4d54f6d8a23f7f177dca62f, entries=1, sequenceid=333, filesize=5.9 K 2024-12-03T05:02:52,813 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 124beb052e88936506b4d7691f51b7dd in 22ms, sequenceid=333, compaction requested=false 2024-12-03T05:02:52,813 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 124beb052e88936506b4d7691f51b7dd: 2024-12-03T05:02:52,813 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C36269%2C1733202063527.1733202172813 2024-12-03T05:02:52,817 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:52,817 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:52,817 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:52,817 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:52,817 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:52,817 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/WALs/32beccd76b35,36269,1733202063527/32beccd76b35%2C36269%2C1733202063527.1733202172733 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/WALs/32beccd76b35,36269,1733202063527/32beccd76b35%2C36269%2C1733202063527.1733202172813 2024-12-03T05:02:52,818 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34719:34719),(127.0.0.1/127.0.0.1:42049:42049)] 2024-12-03T05:02:52,818 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/WALs/32beccd76b35,36269,1733202063527/32beccd76b35%2C36269%2C1733202063527.1733202172733 is not closed yet, will try archiving it next time 2024-12-03T05:02:52,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741875_1051 (size=731) 2024-12-03T05:02:52,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741875_1051 (size=731) 2024-12-03T05:02:52,820 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/WALs/32beccd76b35,36269,1733202063527/32beccd76b35%2C36269%2C1733202063527.1733202064783 to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/oldWALs/32beccd76b35%2C36269%2C1733202063527.1733202064783 2024-12-03T05:02:52,820 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-03T05:02:52,820 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/WALs/32beccd76b35,36269,1733202063527/32beccd76b35%2C36269%2C1733202063527.1733202172733 to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/oldWALs/32beccd76b35%2C36269%2C1733202063527.1733202172733 2024-12-03T05:02:52,920 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T05:02:52,920 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T05:02:52,921 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T05:02:52,921 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:02:52,921 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:02:52,921 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T05:02:52,921 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=641270844, stopped=false 2024-12-03T05:02:52,921 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=32beccd76b35,37267,1733202063393 2024-12-03T05:02:52,921 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T05:02:52,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T05:02:52,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T05:02:52,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:52,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:52,924 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T05:02:52,924 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T05:02:52,924 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T05:02:52,924 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:02:52,924 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '32beccd76b35,36269,1733202063527' ***** 2024-12-03T05:02:52,924 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T05:02:52,925 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T05:02:52,925 INFO [RS:0;32beccd76b35:36269 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T05:02:52,925 INFO [RS:0;32beccd76b35:36269 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T05:02:52,925 INFO [RS:0;32beccd76b35:36269 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T05:02:52,925 INFO [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(3091): Received CLOSE for d18ac3340fdcc16022f867385a7942da 2024-12-03T05:02:52,925 INFO [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(3091): Received CLOSE for 124beb052e88936506b4d7691f51b7dd 2024-12-03T05:02:52,925 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T05:02:52,925 INFO [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(959): stopping server 32beccd76b35,36269,1733202063527 2024-12-03T05:02:52,925 INFO [RS:0;32beccd76b35:36269 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T05:02:52,925 INFO [RS:0;32beccd76b35:36269 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;32beccd76b35:36269. 2024-12-03T05:02:52,925 DEBUG [RS:0;32beccd76b35:36269 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T05:02:52,926 DEBUG [RS:0;32beccd76b35:36269 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:02:52,926 INFO [RS:0;32beccd76b35:36269 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T05:02:52,926 INFO [RS:0;32beccd76b35:36269 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T05:02:52,926 INFO [RS:0;32beccd76b35:36269 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T05:02:52,926 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d18ac3340fdcc16022f867385a7942da, disabling compactions & flushes 2024-12-03T05:02:52,926 INFO [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T05:02:52,926 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da. 2024-12-03T05:02:52,926 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da. 2024-12-03T05:02:52,926 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da. after waiting 0 ms 2024-12-03T05:02:52,926 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da. 2024-12-03T05:02:52,928 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T05:02:52,929 INFO [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-03T05:02:52,929 DEBUG [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(1325): Online Regions={d18ac3340fdcc16022f867385a7942da=TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da., 1588230740=hbase:meta,,1.1588230740, 124beb052e88936506b4d7691f51b7dd=TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.} 2024-12-03T05:02:52,929 DEBUG [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(1351): Waiting on 124beb052e88936506b4d7691f51b7dd, 1588230740, d18ac3340fdcc16022f867385a7942da 2024-12-03T05:02:52,929 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T05:02:52,930 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T05:02:52,930 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T05:02:52,930 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T05:02:52,930 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T05:02:52,930 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/d18ac3340fdcc16022f867385a7942da/info/1c5b65c0b85d45fda44c90652b16e4ee.c411983ff3bea3ce63a0c6f49ca7b025->hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/1c5b65c0b85d45fda44c90652b16e4ee-bottom] to archive 2024-12-03T05:02:52,931 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T05:02:52,939 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/d18ac3340fdcc16022f867385a7942da/info/1c5b65c0b85d45fda44c90652b16e4ee.c411983ff3bea3ce63a0c6f49ca7b025 to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/d18ac3340fdcc16022f867385a7942da/info/1c5b65c0b85d45fda44c90652b16e4ee.c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:02:52,940 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=32beccd76b35:37267 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-03T05:02:52,940 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-03T05:02:52,950 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-03T05:02:52,950 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T05:02:52,951 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T05:02:52,951 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733202172929Running coprocessor pre-close hooks at 1733202172929Disabling compacts and flushes for region at 1733202172929Disabling writes for close at 1733202172930 (+1 ms)Writing region close event to WAL at 1733202172933 (+3 ms)Running coprocessor post-close hooks at 1733202172950 (+17 ms)Closed at 1733202172951 (+1 ms) 2024-12-03T05:02:52,951 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T05:02:52,955 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/d18ac3340fdcc16022f867385a7942da/recovered.edits/105.seqid, newMaxSeqId=105, maxSeqId=100 2024-12-03T05:02:52,955 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da. 2024-12-03T05:02:52,955 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d18ac3340fdcc16022f867385a7942da: Waiting for close lock at 1733202172926Running coprocessor pre-close hooks at 1733202172926Disabling compacts and flushes for region at 1733202172926Disabling writes for close at 1733202172926Writing region close event to WAL at 1733202172945 (+19 ms)Running coprocessor post-close hooks at 1733202172955 (+10 ms)Closed at 1733202172955 2024-12-03T05:02:52,955 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733202089989.d18ac3340fdcc16022f867385a7942da. 2024-12-03T05:02:52,956 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 124beb052e88936506b4d7691f51b7dd, disabling compactions & flushes 2024-12-03T05:02:52,956 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:02:52,956 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:02:52,956 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. after waiting 0 ms 2024-12-03T05:02:52,956 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:02:52,956 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/1c5b65c0b85d45fda44c90652b16e4ee.c411983ff3bea3ce63a0c6f49ca7b025->hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/c411983ff3bea3ce63a0c6f49ca7b025/info/1c5b65c0b85d45fda44c90652b16e4ee-top, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/3ee9ca291eaf402b865b12fa6512b415, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/TestLogRolling-testLogRolling=c411983ff3bea3ce63a0c6f49ca7b025-8bb8a556f2bd479db428aa48b6543d77, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/9d28839a928045d68c094fb0a51d23e8, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/c6ee57c0efa64336930d78561c2c7dab, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/f1f71801ebe24231b2cc9d1dbd46e4da, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/e43d5487a5c7493e8c1d2155d12d9a7c, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/109be95eed2d4335b879606e3cbd6351, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/3fe3677a53ec47248aab85102c0d742a, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/d6fb0115da6c4b46abda54480379392b, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/598fa62b12824d5d887b73bdc49ce12f, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/bf3312352cc74e438209571496a71b0d, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/1c477817965445d284db8e83dcffc27c, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/3b90478f2e4d4286b8394f460076e05a, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/6c73417a8bc5487bae432629bc64f90c, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/bf8ccfe215364848a054ca8c5b3b5d27, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/cad2b8a8ad2946619fcdeaea237fff83, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/eea6df3d1aa345819ecd4d9dd0661077, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/f8a0d37d85fe4873bc39965c3cda66e7, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/2c470d44f77845ee931b4920cb40f2ff, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/37a2ffc6c2144c068c331392a5f08b0d, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/0504eceb52e547eab2ca133565fb22d7, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/895a68811aa64a1b830307461757cc01] to archive 2024-12-03T05:02:52,957 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T05:02:52,959 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/1c5b65c0b85d45fda44c90652b16e4ee.c411983ff3bea3ce63a0c6f49ca7b025 to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/1c5b65c0b85d45fda44c90652b16e4ee.c411983ff3bea3ce63a0c6f49ca7b025 2024-12-03T05:02:52,962 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/3ee9ca291eaf402b865b12fa6512b415 to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/3ee9ca291eaf402b865b12fa6512b415 2024-12-03T05:02:52,963 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/TestLogRolling-testLogRolling=c411983ff3bea3ce63a0c6f49ca7b025-8bb8a556f2bd479db428aa48b6543d77 to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/TestLogRolling-testLogRolling=c411983ff3bea3ce63a0c6f49ca7b025-8bb8a556f2bd479db428aa48b6543d77 2024-12-03T05:02:52,965 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/9d28839a928045d68c094fb0a51d23e8 to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/9d28839a928045d68c094fb0a51d23e8 2024-12-03T05:02:52,970 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/c6ee57c0efa64336930d78561c2c7dab to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/c6ee57c0efa64336930d78561c2c7dab 2024-12-03T05:02:52,977 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/f1f71801ebe24231b2cc9d1dbd46e4da to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/f1f71801ebe24231b2cc9d1dbd46e4da 2024-12-03T05:02:52,979 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/e43d5487a5c7493e8c1d2155d12d9a7c to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/e43d5487a5c7493e8c1d2155d12d9a7c 2024-12-03T05:02:52,982 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/109be95eed2d4335b879606e3cbd6351 to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/109be95eed2d4335b879606e3cbd6351 2024-12-03T05:02:52,987 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/3fe3677a53ec47248aab85102c0d742a to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/3fe3677a53ec47248aab85102c0d742a 2024-12-03T05:02:52,991 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/d6fb0115da6c4b46abda54480379392b to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/d6fb0115da6c4b46abda54480379392b 2024-12-03T05:02:52,993 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/598fa62b12824d5d887b73bdc49ce12f to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/598fa62b12824d5d887b73bdc49ce12f 2024-12-03T05:02:52,996 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/bf3312352cc74e438209571496a71b0d to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/bf3312352cc74e438209571496a71b0d 2024-12-03T05:02:52,998 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/1c477817965445d284db8e83dcffc27c to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/1c477817965445d284db8e83dcffc27c 2024-12-03T05:02:52,999 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/3b90478f2e4d4286b8394f460076e05a to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/3b90478f2e4d4286b8394f460076e05a 2024-12-03T05:02:53,000 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/6c73417a8bc5487bae432629bc64f90c to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/6c73417a8bc5487bae432629bc64f90c 2024-12-03T05:02:53,002 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/bf8ccfe215364848a054ca8c5b3b5d27 to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/bf8ccfe215364848a054ca8c5b3b5d27 2024-12-03T05:02:53,003 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/cad2b8a8ad2946619fcdeaea237fff83 to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/cad2b8a8ad2946619fcdeaea237fff83 2024-12-03T05:02:53,004 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/eea6df3d1aa345819ecd4d9dd0661077 to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/eea6df3d1aa345819ecd4d9dd0661077 2024-12-03T05:02:53,005 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/f8a0d37d85fe4873bc39965c3cda66e7 to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/f8a0d37d85fe4873bc39965c3cda66e7 2024-12-03T05:02:53,007 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/2c470d44f77845ee931b4920cb40f2ff to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/2c470d44f77845ee931b4920cb40f2ff 2024-12-03T05:02:53,008 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/37a2ffc6c2144c068c331392a5f08b0d to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/37a2ffc6c2144c068c331392a5f08b0d 2024-12-03T05:02:53,009 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/0504eceb52e547eab2ca133565fb22d7 to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/0504eceb52e547eab2ca133565fb22d7 2024-12-03T05:02:53,011 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/895a68811aa64a1b830307461757cc01 to hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/archive/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/info/895a68811aa64a1b830307461757cc01 2024-12-03T05:02:53,011 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [3ee9ca291eaf402b865b12fa6512b415=21307, 9d28839a928045d68c094fb0a51d23e8=12509, c6ee57c0efa64336930d78561c2c7dab=44066, f1f71801ebe24231b2cc9d1dbd46e4da=20064, e43d5487a5c7493e8c1d2155d12d9a7c=22238, 109be95eed2d4335b879606e3cbd6351=76649, 3fe3677a53ec47248aab85102c0d742a=20078, d6fb0115da6c4b46abda54480379392b=12516, 598fa62b12824d5d887b73bdc49ce12f=103705, bf3312352cc74e438209571496a71b0d=24394, 1c477817965445d284db8e83dcffc27c=14672, 3b90478f2e4d4286b8394f460076e05a=134127, 6c73417a8bc5487bae432629bc64f90c=25472, bf8ccfe215364848a054ca8c5b3b5d27=16828, cad2b8a8ad2946619fcdeaea237fff83=153707, eea6df3d1aa345819ecd4d9dd0661077=12517, f8a0d37d85fe4873bc39965c3cda66e7=22254, 2c470d44f77845ee931b4920cb40f2ff=182950, 37a2ffc6c2144c068c331392a5f08b0d=16839, 0504eceb52e547eab2ca133565fb22d7=24412, 895a68811aa64a1b830307461757cc01=17918] 2024-12-03T05:02:53,015 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/data/default/TestLogRolling-testLogRolling/124beb052e88936506b4d7691f51b7dd/recovered.edits/336.seqid, newMaxSeqId=336, maxSeqId=100 2024-12-03T05:02:53,016 INFO [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:02:53,016 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 124beb052e88936506b4d7691f51b7dd: Waiting for close lock at 1733202172956Running coprocessor pre-close hooks at 1733202172956Disabling compacts and flushes for region at 1733202172956Disabling writes for close at 1733202172956Writing region close event to WAL at 1733202173011 (+55 ms)Running coprocessor post-close hooks at 1733202173016 (+5 ms)Closed at 1733202173016 2024-12-03T05:02:53,016 DEBUG [RS_CLOSE_REGION-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733202089989.124beb052e88936506b4d7691f51b7dd. 2024-12-03T05:02:53,129 INFO [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(976): stopping server 32beccd76b35,36269,1733202063527; all regions closed. 2024-12-03T05:02:53,130 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:53,130 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:53,130 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:53,130 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:53,131 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:53,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741834_1010 (size=8107) 2024-12-03T05:02:53,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741834_1010 (size=8107) 2024-12-03T05:02:53,135 DEBUG [RS:0;32beccd76b35:36269 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/oldWALs 2024-12-03T05:02:53,136 INFO [RS:0;32beccd76b35:36269 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 32beccd76b35%2C36269%2C1733202063527.meta:.meta(num 1733202065194) 2024-12-03T05:02:53,136 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:53,136 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:53,136 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:53,136 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:53,136 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:53,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741878_1054 (size=778) 2024-12-03T05:02:53,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741878_1054 (size=778) 2024-12-03T05:02:53,141 DEBUG [RS:0;32beccd76b35:36269 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/oldWALs 2024-12-03T05:02:53,141 INFO [RS:0;32beccd76b35:36269 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 32beccd76b35%2C36269%2C1733202063527:(num 1733202172813) 2024-12-03T05:02:53,141 DEBUG [RS:0;32beccd76b35:36269 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:02:53,141 INFO [RS:0;32beccd76b35:36269 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T05:02:53,141 INFO [RS:0;32beccd76b35:36269 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T05:02:53,141 INFO [RS:0;32beccd76b35:36269 {}] hbase.ChoreService(370): Chore service for: regionserver/32beccd76b35:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T05:02:53,141 INFO [RS:0;32beccd76b35:36269 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T05:02:53,141 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T05:02:53,141 INFO [RS:0;32beccd76b35:36269 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36269 2024-12-03T05:02:53,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/32beccd76b35,36269,1733202063527 2024-12-03T05:02:53,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T05:02:53,144 INFO [RS:0;32beccd76b35:36269 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T05:02:53,145 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [32beccd76b35,36269,1733202063527] 2024-12-03T05:02:53,147 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/32beccd76b35,36269,1733202063527 already deleted, retry=false 2024-12-03T05:02:53,147 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 32beccd76b35,36269,1733202063527 expired; onlineServers=0 2024-12-03T05:02:53,147 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '32beccd76b35,37267,1733202063393' ***** 2024-12-03T05:02:53,147 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T05:02:53,147 INFO [M:0;32beccd76b35:37267 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T05:02:53,147 INFO [M:0;32beccd76b35:37267 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T05:02:53,147 DEBUG [M:0;32beccd76b35:37267 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T05:02:53,147 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T05:02:53,147 DEBUG [M:0;32beccd76b35:37267 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T05:02:53,147 DEBUG [master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733202064518 {}] cleaner.HFileCleaner(306): Exit Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733202064518,5,FailOnTimeoutGroup] 2024-12-03T05:02:53,147 DEBUG [master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733202064518 {}] cleaner.HFileCleaner(306): Exit Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733202064518,5,FailOnTimeoutGroup] 2024-12-03T05:02:53,147 INFO [M:0;32beccd76b35:37267 {}] hbase.ChoreService(370): Chore service for: master/32beccd76b35:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T05:02:53,147 INFO [M:0;32beccd76b35:37267 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T05:02:53,147 DEBUG [M:0;32beccd76b35:37267 {}] master.HMaster(1795): Stopping service threads 2024-12-03T05:02:53,148 INFO [M:0;32beccd76b35:37267 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T05:02:53,148 INFO [M:0;32beccd76b35:37267 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T05:02:53,148 INFO [M:0;32beccd76b35:37267 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T05:02:53,148 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T05:02:53,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T05:02:53,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:53,149 DEBUG [M:0;32beccd76b35:37267 {}] zookeeper.ZKUtil(347): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T05:02:53,149 WARN [M:0;32beccd76b35:37267 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T05:02:53,149 INFO [M:0;32beccd76b35:37267 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/.lastflushedseqids 2024-12-03T05:02:53,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741879_1055 (size=228) 2024-12-03T05:02:53,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741879_1055 (size=228) 2024-12-03T05:02:53,159 INFO [M:0;32beccd76b35:37267 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T05:02:53,159 INFO [M:0;32beccd76b35:37267 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T05:02:53,159 DEBUG [M:0;32beccd76b35:37267 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T05:02:53,159 INFO [M:0;32beccd76b35:37267 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:02:53,159 DEBUG [M:0;32beccd76b35:37267 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:02:53,159 DEBUG [M:0;32beccd76b35:37267 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T05:02:53,159 DEBUG [M:0;32beccd76b35:37267 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:02:53,160 INFO [M:0;32beccd76b35:37267 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-12-03T05:02:53,183 DEBUG [M:0;32beccd76b35:37267 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/50d6ea6b0b1e4e3193ae3ad8f7b2aca9 is 82, key is hbase:meta,,1/info:regioninfo/1733202065419/Put/seqid=0 2024-12-03T05:02:53,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741880_1056 (size=5672) 2024-12-03T05:02:53,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741880_1056 (size=5672) 2024-12-03T05:02:53,188 INFO [M:0;32beccd76b35:37267 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/50d6ea6b0b1e4e3193ae3ad8f7b2aca9 2024-12-03T05:02:53,213 DEBUG [M:0;32beccd76b35:37267 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/be9c0e798c0e4fef8d14c56086acbedd is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733202066096/Put/seqid=0 2024-12-03T05:02:53,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741881_1057 (size=7090) 2024-12-03T05:02:53,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741881_1057 (size=7090) 2024-12-03T05:02:53,218 INFO [M:0;32beccd76b35:37267 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/be9c0e798c0e4fef8d14c56086acbedd 2024-12-03T05:02:53,223 INFO [M:0;32beccd76b35:37267 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for be9c0e798c0e4fef8d14c56086acbedd 2024-12-03T05:02:53,244 DEBUG [M:0;32beccd76b35:37267 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0ed4a21bc0954be5b54ab4516220fd16 is 69, key is 32beccd76b35,36269,1733202063527/rs:state/1733202064603/Put/seqid=0 2024-12-03T05:02:53,245 INFO [RS:0;32beccd76b35:36269 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T05:02:53,245 INFO [RS:0;32beccd76b35:36269 {}] regionserver.HRegionServer(1031): Exiting; stopping=32beccd76b35,36269,1733202063527; zookeeper connection closed. 2024-12-03T05:02:53,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T05:02:53,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1015177a4ae0001, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T05:02:53,245 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@73fad587 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@73fad587 2024-12-03T05:02:53,246 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T05:02:53,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741882_1058 (size=5156) 2024-12-03T05:02:53,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741882_1058 (size=5156) 2024-12-03T05:02:53,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:53,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:53,650 INFO [M:0;32beccd76b35:37267 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0ed4a21bc0954be5b54ab4516220fd16 2024-12-03T05:02:53,676 DEBUG [M:0;32beccd76b35:37267 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/94598eb433404822b29a2ef84667327a is 52, key is load_balancer_on/state:d/1733202065552/Put/seqid=0 2024-12-03T05:02:53,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741883_1059 (size=5056) 2024-12-03T05:02:53,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741883_1059 (size=5056) 2024-12-03T05:02:54,083 INFO [M:0;32beccd76b35:37267 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/94598eb433404822b29a2ef84667327a 2024-12-03T05:02:54,088 DEBUG [M:0;32beccd76b35:37267 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/50d6ea6b0b1e4e3193ae3ad8f7b2aca9 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/50d6ea6b0b1e4e3193ae3ad8f7b2aca9 2024-12-03T05:02:54,093 INFO [M:0;32beccd76b35:37267 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/50d6ea6b0b1e4e3193ae3ad8f7b2aca9, entries=8, sequenceid=125, filesize=5.5 K 2024-12-03T05:02:54,094 DEBUG [M:0;32beccd76b35:37267 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/be9c0e798c0e4fef8d14c56086acbedd as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/be9c0e798c0e4fef8d14c56086acbedd 2024-12-03T05:02:54,099 INFO [M:0;32beccd76b35:37267 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for be9c0e798c0e4fef8d14c56086acbedd 2024-12-03T05:02:54,099 INFO [M:0;32beccd76b35:37267 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/be9c0e798c0e4fef8d14c56086acbedd, entries=13, sequenceid=125, filesize=6.9 K 2024-12-03T05:02:54,100 DEBUG [M:0;32beccd76b35:37267 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0ed4a21bc0954be5b54ab4516220fd16 as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0ed4a21bc0954be5b54ab4516220fd16 2024-12-03T05:02:54,104 INFO [M:0;32beccd76b35:37267 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0ed4a21bc0954be5b54ab4516220fd16, entries=1, sequenceid=125, filesize=5.0 K 2024-12-03T05:02:54,105 DEBUG [M:0;32beccd76b35:37267 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/94598eb433404822b29a2ef84667327a as hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/94598eb433404822b29a2ef84667327a 2024-12-03T05:02:54,109 INFO [M:0;32beccd76b35:37267 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36989/user/jenkins/test-data/47b20840-546a-1edf-71fc-f731925442cf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/94598eb433404822b29a2ef84667327a, entries=1, sequenceid=125, filesize=4.9 K 2024-12-03T05:02:54,111 INFO [M:0;32beccd76b35:37267 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 952ms, sequenceid=125, compaction requested=false 2024-12-03T05:02:54,113 INFO [M:0;32beccd76b35:37267 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:02:54,113 DEBUG [M:0;32beccd76b35:37267 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733202173159Disabling compacts and flushes for region at 1733202173159Disabling writes for close at 1733202173159Obtaining lock to block concurrent updates at 1733202173160 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733202173160Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1733202173160Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733202173161 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733202173161Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733202173182 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733202173182Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733202173193 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733202173212 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733202173212Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733202173224 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733202173243 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733202173243Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733202173655 (+412 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733202173675 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733202173675Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35a68e09: reopening flushed file at 1733202174087 (+412 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e1b0e21: reopening flushed file at 1733202174093 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44bc9db5: reopening flushed file at 1733202174099 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11a882d0: reopening flushed file at 1733202174104 (+5 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 952ms, sequenceid=125, compaction requested=false at 1733202174111 (+7 ms)Writing region close event to WAL at 1733202174112 (+1 ms)Closed at 1733202174113 (+1 ms) 2024-12-03T05:02:54,113 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:54,113 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:54,113 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:54,113 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:54,114 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:54,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36851 is added to blk_1073741830_1006 (size=61320) 2024-12-03T05:02:54,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36125 is added to blk_1073741830_1006 (size=61320) 2024-12-03T05:02:54,117 INFO [M:0;32beccd76b35:37267 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T05:02:54,117 INFO [M:0;32beccd76b35:37267 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37267 2024-12-03T05:02:54,117 INFO [M:0;32beccd76b35:37267 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T05:02:54,117 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T05:02:54,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T05:02:54,219 INFO [M:0;32beccd76b35:37267 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T05:02:54,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37267-0x1015177a4ae0000, quorum=127.0.0.1:52145, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T05:02:54,226 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@432aed61{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T05:02:54,226 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f29755b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T05:02:54,227 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T05:02:54,227 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d89d07b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T05:02:54,227 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56dad49a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/hadoop.log.dir/,STOPPED} 2024-12-03T05:02:54,230 WARN [BP-1385824722-172.17.0.3-1733202062277 heartbeating to localhost/127.0.0.1:36989 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T05:02:54,230 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T05:02:54,230 WARN [BP-1385824722-172.17.0.3-1733202062277 heartbeating to localhost/127.0.0.1:36989 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1385824722-172.17.0.3-1733202062277 (Datanode Uuid d1e3e6fe-73a3-47ed-8d3b-a6cd6abd4aa4) service to localhost/127.0.0.1:36989 2024-12-03T05:02:54,230 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T05:02:54,230 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/cluster_bd6a4643-fcaa-14d8-80d1-ab69445c54a4/data/data3/current/BP-1385824722-172.17.0.3-1733202062277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T05:02:54,230 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/cluster_bd6a4643-fcaa-14d8-80d1-ab69445c54a4/data/data4/current/BP-1385824722-172.17.0.3-1733202062277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T05:02:54,231 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T05:02:54,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c45f3c2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T05:02:54,233 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5dd84d20{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T05:02:54,233 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T05:02:54,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d27641d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T05:02:54,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3743fb08{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/hadoop.log.dir/,STOPPED} 2024-12-03T05:02:54,234 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T05:02:54,234 WARN [BP-1385824722-172.17.0.3-1733202062277 heartbeating to localhost/127.0.0.1:36989 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T05:02:54,234 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T05:02:54,234 WARN [BP-1385824722-172.17.0.3-1733202062277 heartbeating to localhost/127.0.0.1:36989 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1385824722-172.17.0.3-1733202062277 (Datanode Uuid 5c35fb61-e1e7-4949-a6a0-9bfbf53ce13f) service to localhost/127.0.0.1:36989 2024-12-03T05:02:54,235 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/cluster_bd6a4643-fcaa-14d8-80d1-ab69445c54a4/data/data1/current/BP-1385824722-172.17.0.3-1733202062277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T05:02:54,235 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/cluster_bd6a4643-fcaa-14d8-80d1-ab69445c54a4/data/data2/current/BP-1385824722-172.17.0.3-1733202062277 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T05:02:54,236 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T05:02:54,243 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6183d646{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T05:02:54,244 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@12849da8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T05:02:54,244 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T05:02:54,244 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2721c5f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T05:02:54,244 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8e75f7d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/hadoop.log.dir/,STOPPED} 2024-12-03T05:02:54,255 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T05:02:54,298 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T05:02:54,308 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=227 (was 205) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/32beccd76b35:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36989 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36989 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:36989 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36989 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:36989 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:36989 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36989 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:36989 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=506 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=547 (was 821), ProcessCount=11 (was 11), AvailableMemoryMB=3965 (was 4156) 2024-12-03T05:02:54,318 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=227, OpenFileDescriptor=506, MaxFileDescriptor=1048576, SystemLoadAverage=547, ProcessCount=11, AvailableMemoryMB=3965 2024-12-03T05:02:54,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T05:02:54,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/hadoop.log.dir so I do NOT create it in target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03 2024-12-03T05:02:54,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0bd41af8-8ad5-3c1d-1ce3-19ee9be22461/hadoop.tmp.dir so I do NOT create it in target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03 2024-12-03T05:02:54,319 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/cluster_2173fbb5-6706-d303-ecd6-3f263f224d80, deleteOnExit=true 2024-12-03T05:02:54,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-03T05:02:54,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/test.cache.data in system properties and HBase conf 2024-12-03T05:02:54,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T05:02:54,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/hadoop.log.dir in system properties and HBase conf 2024-12-03T05:02:54,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T05:02:54,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T05:02:54,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-03T05:02:54,320 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T05:02:54,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T05:02:54,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T05:02:54,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T05:02:54,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T05:02:54,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T05:02:54,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T05:02:54,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T05:02:54,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T05:02:54,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T05:02:54,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/nfs.dump.dir in system properties and HBase conf 2024-12-03T05:02:54,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/java.io.tmpdir in system properties and HBase conf 2024-12-03T05:02:54,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T05:02:54,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T05:02:54,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T05:02:54,334 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T05:02:54,394 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T05:02:54,398 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T05:02:54,402 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T05:02:54,402 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T05:02:54,402 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T05:02:54,403 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T05:02:54,403 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@610df049{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/hadoop.log.dir/,AVAILABLE} 2024-12-03T05:02:54,404 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@455a32b8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T05:02:54,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:54,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:54,520 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3cb23f43{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/java.io.tmpdir/jetty-localhost-35633-hadoop-hdfs-3_4_1-tests_jar-_-any-11304135031778493354/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T05:02:54,520 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1d180ecd{HTTP/1.1, (http/1.1)}{localhost:35633} 2024-12-03T05:02:54,520 INFO [Time-limited test {}] server.Server(415): Started @358683ms 2024-12-03T05:02:54,533 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-03T05:02:54,656 INFO [regionserver/32beccd76b35:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T05:02:54,684 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T05:02:54,688 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T05:02:54,697 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T05:02:54,697 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T05:02:54,697 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T05:02:54,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ddf6979{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/hadoop.log.dir/,AVAILABLE} 2024-12-03T05:02:54,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1917abb0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T05:02:54,831 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7fb436c5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/java.io.tmpdir/jetty-localhost-41097-hadoop-hdfs-3_4_1-tests_jar-_-any-2879450581358855287/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T05:02:54,832 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@59a2b0cf{HTTP/1.1, (http/1.1)}{localhost:41097} 2024-12-03T05:02:54,832 INFO [Time-limited test {}] server.Server(415): Started @358994ms 2024-12-03T05:02:54,833 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T05:02:54,866 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T05:02:54,870 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T05:02:54,870 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T05:02:54,870 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T05:02:54,871 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-03T05:02:54,871 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6ea1bbf3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/hadoop.log.dir/,AVAILABLE} 2024-12-03T05:02:54,871 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@751df37f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T05:02:54,924 WARN [Thread-2472 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/cluster_2173fbb5-6706-d303-ecd6-3f263f224d80/data/data1/current/BP-1834779685-172.17.0.3-1733202174340/current, will proceed with Du for space computation calculation, 2024-12-03T05:02:54,924 WARN [Thread-2473 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/cluster_2173fbb5-6706-d303-ecd6-3f263f224d80/data/data2/current/BP-1834779685-172.17.0.3-1733202174340/current, will proceed with Du for space computation calculation, 2024-12-03T05:02:54,953 WARN [Thread-2451 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T05:02:54,958 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c2d9ea1b2865bc6 with lease ID 0xf8fbfa5fdd7b7d7c: Processing first storage report for DS-f91e7760-5686-4bf7-a279-dc9cfbb721d9 from datanode DatanodeRegistration(127.0.0.1:44997, datanodeUuid=f3730e1e-719b-4219-ab73-759633a6cb5f, infoPort=36383, infoSecurePort=0, ipcPort=40659, storageInfo=lv=-57;cid=testClusterID;nsid=1720138092;c=1733202174340) 2024-12-03T05:02:54,958 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c2d9ea1b2865bc6 with lease ID 0xf8fbfa5fdd7b7d7c: from storage DS-f91e7760-5686-4bf7-a279-dc9cfbb721d9 node DatanodeRegistration(127.0.0.1:44997, datanodeUuid=f3730e1e-719b-4219-ab73-759633a6cb5f, infoPort=36383, infoSecurePort=0, ipcPort=40659, storageInfo=lv=-57;cid=testClusterID;nsid=1720138092;c=1733202174340), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T05:02:54,958 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1c2d9ea1b2865bc6 with lease ID 0xf8fbfa5fdd7b7d7c: Processing first storage report for DS-35d19edf-a6f4-41fa-a638-135ca839caf0 from datanode DatanodeRegistration(127.0.0.1:44997, datanodeUuid=f3730e1e-719b-4219-ab73-759633a6cb5f, infoPort=36383, infoSecurePort=0, ipcPort=40659, storageInfo=lv=-57;cid=testClusterID;nsid=1720138092;c=1733202174340) 2024-12-03T05:02:54,958 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1c2d9ea1b2865bc6 with lease ID 0xf8fbfa5fdd7b7d7c: from storage DS-35d19edf-a6f4-41fa-a638-135ca839caf0 node DatanodeRegistration(127.0.0.1:44997, datanodeUuid=f3730e1e-719b-4219-ab73-759633a6cb5f, infoPort=36383, infoSecurePort=0, ipcPort=40659, storageInfo=lv=-57;cid=testClusterID;nsid=1720138092;c=1733202174340), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T05:02:55,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26b5df9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/java.io.tmpdir/jetty-localhost-44859-hadoop-hdfs-3_4_1-tests_jar-_-any-13606423147674761068/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T05:02:55,008 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6d371f78{HTTP/1.1, (http/1.1)}{localhost:44859} 2024-12-03T05:02:55,008 INFO [Time-limited test {}] server.Server(415): Started @359170ms 2024-12-03T05:02:55,009 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T05:02:55,117 WARN [Thread-2499 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/cluster_2173fbb5-6706-d303-ecd6-3f263f224d80/data/data4/current/BP-1834779685-172.17.0.3-1733202174340/current, will proceed with Du for space computation calculation, 2024-12-03T05:02:55,117 WARN [Thread-2498 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/cluster_2173fbb5-6706-d303-ecd6-3f263f224d80/data/data3/current/BP-1834779685-172.17.0.3-1733202174340/current, will proceed with Du for space computation calculation, 2024-12-03T05:02:55,142 WARN [Thread-2487 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T05:02:55,146 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc91ef8610274e4c6 with lease ID 0xf8fbfa5fdd7b7d7d: Processing first storage report for DS-180710b4-0a38-4642-86b9-c2032fe5b100 from datanode DatanodeRegistration(127.0.0.1:33763, datanodeUuid=132b9318-0011-440b-b088-e1556f180ba4, infoPort=43977, infoSecurePort=0, ipcPort=40353, storageInfo=lv=-57;cid=testClusterID;nsid=1720138092;c=1733202174340) 2024-12-03T05:02:55,146 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc91ef8610274e4c6 with lease ID 0xf8fbfa5fdd7b7d7d: from storage DS-180710b4-0a38-4642-86b9-c2032fe5b100 node DatanodeRegistration(127.0.0.1:33763, datanodeUuid=132b9318-0011-440b-b088-e1556f180ba4, infoPort=43977, infoSecurePort=0, ipcPort=40353, storageInfo=lv=-57;cid=testClusterID;nsid=1720138092;c=1733202174340), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T05:02:55,147 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc91ef8610274e4c6 with lease ID 0xf8fbfa5fdd7b7d7d: Processing first storage report for DS-128efff3-8a12-47bd-82af-9f81364d25ce from datanode DatanodeRegistration(127.0.0.1:33763, datanodeUuid=132b9318-0011-440b-b088-e1556f180ba4, infoPort=43977, infoSecurePort=0, ipcPort=40353, storageInfo=lv=-57;cid=testClusterID;nsid=1720138092;c=1733202174340) 2024-12-03T05:02:55,147 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc91ef8610274e4c6 with lease ID 0xf8fbfa5fdd7b7d7d: from storage DS-128efff3-8a12-47bd-82af-9f81364d25ce node DatanodeRegistration(127.0.0.1:33763, datanodeUuid=132b9318-0011-440b-b088-e1556f180ba4, infoPort=43977, infoSecurePort=0, ipcPort=40353, storageInfo=lv=-57;cid=testClusterID;nsid=1720138092;c=1733202174340), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T05:02:55,158 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03 2024-12-03T05:02:55,162 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/cluster_2173fbb5-6706-d303-ecd6-3f263f224d80/zookeeper_0, clientPort=54029, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/cluster_2173fbb5-6706-d303-ecd6-3f263f224d80/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/cluster_2173fbb5-6706-d303-ecd6-3f263f224d80/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T05:02:55,165 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54029 2024-12-03T05:02:55,165 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:02:55,167 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:02:55,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741825_1001 (size=7) 2024-12-03T05:02:55,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741825_1001 (size=7) 2024-12-03T05:02:55,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:55,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:55,580 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75 with version=8 2024-12-03T05:02:55,580 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44433/user/jenkins/test-data/9005074b-9dcf-b221-e577-aedebfd16ff4/hbase-staging 2024-12-03T05:02:55,583 INFO [Time-limited test {}] client.ConnectionUtils(128): master/32beccd76b35:0 server-side Connection retries=45 2024-12-03T05:02:55,583 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T05:02:55,583 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T05:02:55,583 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T05:02:55,583 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T05:02:55,583 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T05:02:55,583 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-03T05:02:55,583 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T05:02:55,584 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34985 2024-12-03T05:02:55,585 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34985 connecting to ZooKeeper ensemble=127.0.0.1:54029 2024-12-03T05:02:55,592 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:349850x0, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T05:02:55,593 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34985-0x101517959630000 connected 2024-12-03T05:02:55,611 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:02:55,612 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:02:55,614 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T05:02:55,614 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75, hbase.cluster.distributed=false 2024-12-03T05:02:55,616 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T05:02:55,616 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34985 2024-12-03T05:02:55,616 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34985 2024-12-03T05:02:55,617 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34985 2024-12-03T05:02:55,617 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34985 2024-12-03T05:02:55,617 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34985 2024-12-03T05:02:55,632 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/32beccd76b35:0 server-side Connection retries=45 2024-12-03T05:02:55,632 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T05:02:55,632 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T05:02:55,632 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T05:02:55,632 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T05:02:55,632 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T05:02:55,632 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T05:02:55,632 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T05:02:55,633 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42503 2024-12-03T05:02:55,633 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42503 connecting to ZooKeeper ensemble=127.0.0.1:54029 2024-12-03T05:02:55,634 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:02:55,636 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:02:55,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:425030x0, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T05:02:55,639 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:425030x0, quorum=127.0.0.1:54029, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T05:02:55,639 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42503-0x101517959630001 connected 2024-12-03T05:02:55,640 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T05:02:55,640 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T05:02:55,641 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T05:02:55,642 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T05:02:55,643 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42503 2024-12-03T05:02:55,643 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42503 2024-12-03T05:02:55,643 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42503 2024-12-03T05:02:55,644 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42503 2024-12-03T05:02:55,644 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42503 2024-12-03T05:02:55,655 DEBUG [M:0;32beccd76b35:34985 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;32beccd76b35:34985 2024-12-03T05:02:55,655 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/32beccd76b35,34985,1733202175582 2024-12-03T05:02:55,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T05:02:55,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T05:02:55,657 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/32beccd76b35,34985,1733202175582 2024-12-03T05:02:55,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T05:02:55,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:55,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:55,659 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T05:02:55,659 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/32beccd76b35,34985,1733202175582 from backup master directory 2024-12-03T05:02:55,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T05:02:55,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/32beccd76b35,34985,1733202175582 2024-12-03T05:02:55,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T05:02:55,661 WARN [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T05:02:55,661 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=32beccd76b35,34985,1733202175582 2024-12-03T05:02:55,664 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/hbase.id] with ID: 79c87186-cdb6-48fa-83ee-7b9fe0ce8ddb 2024-12-03T05:02:55,664 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/.tmp/hbase.id 2024-12-03T05:02:55,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741826_1002 (size=42) 2024-12-03T05:02:55,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741826_1002 (size=42) 2024-12-03T05:02:55,670 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/.tmp/hbase.id]:[hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/hbase.id] 2024-12-03T05:02:55,680 INFO [master/32beccd76b35:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:02:55,680 INFO [master/32beccd76b35:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-03T05:02:55,681 INFO [master/32beccd76b35:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-03T05:02:55,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:55,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:55,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741827_1003 (size=196) 2024-12-03T05:02:55,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741827_1003 (size=196) 2024-12-03T05:02:55,691 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T05:02:55,691 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T05:02:55,692 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T05:02:55,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741828_1004 (size=1189) 2024-12-03T05:02:55,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741828_1004 (size=1189) 2024-12-03T05:02:55,698 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store 2024-12-03T05:02:55,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741829_1005 (size=34) 2024-12-03T05:02:55,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741829_1005 (size=34) 2024-12-03T05:02:55,707 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:02:55,707 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T05:02:55,708 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:02:55,708 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:02:55,708 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T05:02:55,708 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:02:55,708 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:02:55,708 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733202175707Disabling compacts and flushes for region at 1733202175707Disabling writes for close at 1733202175708 (+1 ms)Writing region close event to WAL at 1733202175708Closed at 1733202175708 2024-12-03T05:02:55,708 WARN [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/.initializing 2024-12-03T05:02:55,709 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/WALs/32beccd76b35,34985,1733202175582 2024-12-03T05:02:55,711 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C34985%2C1733202175582, suffix=, logDir=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/WALs/32beccd76b35,34985,1733202175582, archiveDir=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/oldWALs, maxLogs=10 2024-12-03T05:02:55,711 INFO [master/32beccd76b35:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C34985%2C1733202175582.1733202175711 2024-12-03T05:02:55,718 INFO [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/WALs/32beccd76b35,34985,1733202175582/32beccd76b35%2C34985%2C1733202175582.1733202175711 2024-12-03T05:02:55,719 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43977:43977),(127.0.0.1/127.0.0.1:36383:36383)] 2024-12-03T05:02:55,719 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T05:02:55,720 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:02:55,720 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:02:55,720 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:02:55,721 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:02:55,722 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T05:02:55,722 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:02:55,722 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:02:55,722 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:02:55,723 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T05:02:55,723 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:02:55,724 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T05:02:55,724 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:02:55,725 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T05:02:55,725 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:02:55,725 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T05:02:55,725 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:02:55,726 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T05:02:55,726 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:02:55,726 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T05:02:55,727 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:02:55,727 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:02:55,727 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:02:55,728 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:02:55,728 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:02:55,729 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T05:02:55,730 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T05:02:55,733 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T05:02:55,733 INFO [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=760866, jitterRate=-0.0325096994638443}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T05:02:55,734 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733202175720Initializing all the Stores at 1733202175720Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202175721 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202175721Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202175721Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202175721Cleaning up temporary data from old regions at 1733202175728 (+7 ms)Region opened successfully at 1733202175733 (+5 ms) 2024-12-03T05:02:55,737 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T05:02:55,740 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3df12ad8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32beccd76b35/172.17.0.3:0 2024-12-03T05:02:55,740 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-03T05:02:55,741 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T05:02:55,741 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T05:02:55,741 INFO [master/32beccd76b35:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T05:02:55,741 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-03T05:02:55,741 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-03T05:02:55,741 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T05:02:55,745 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T05:02:55,746 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T05:02:55,747 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-03T05:02:55,747 INFO [master/32beccd76b35:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T05:02:55,748 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T05:02:55,749 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-03T05:02:55,749 INFO [master/32beccd76b35:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T05:02:55,750 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T05:02:55,751 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-03T05:02:55,751 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T05:02:55,752 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T05:02:55,754 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T05:02:55,755 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T05:02:55,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T05:02:55,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T05:02:55,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:55,757 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:55,757 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=32beccd76b35,34985,1733202175582, sessionid=0x101517959630000, setting cluster-up flag (Was=false) 2024-12-03T05:02:55,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:55,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:55,764 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T05:02:55,765 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=32beccd76b35,34985,1733202175582 2024-12-03T05:02:55,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:55,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:55,771 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T05:02:55,772 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=32beccd76b35,34985,1733202175582 2024-12-03T05:02:55,773 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-03T05:02:55,774 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-03T05:02:55,775 INFO [master/32beccd76b35:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-03T05:02:55,775 INFO [master/32beccd76b35:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T05:02:55,775 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 32beccd76b35,34985,1733202175582 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T05:02:55,776 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T05:02:55,776 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T05:02:55,776 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T05:02:55,776 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/32beccd76b35:0, corePoolSize=5, maxPoolSize=5 2024-12-03T05:02:55,776 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/32beccd76b35:0, corePoolSize=10, maxPoolSize=10 2024-12-03T05:02:55,776 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:02:55,776 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/32beccd76b35:0, corePoolSize=2, maxPoolSize=2 2024-12-03T05:02:55,776 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:02:55,777 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733202205777 2024-12-03T05:02:55,777 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T05:02:55,777 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T05:02:55,777 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T05:02:55,777 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T05:02:55,777 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T05:02:55,777 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T05:02:55,777 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T05:02:55,777 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-03T05:02:55,778 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:02:55,778 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T05:02:55,781 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:55,782 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T05:02:55,782 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T05:02:55,782 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T05:02:55,782 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T05:02:55,782 INFO [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T05:02:55,782 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733202175782,5,FailOnTimeoutGroup] 2024-12-03T05:02:55,784 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733202175782,5,FailOnTimeoutGroup] 2024-12-03T05:02:55,784 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:55,784 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T05:02:55,784 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:55,784 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:55,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741831_1007 (size=1321) 2024-12-03T05:02:55,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741831_1007 (size=1321) 2024-12-03T05:02:55,790 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-03T05:02:55,791 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75 2024-12-03T05:02:55,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741832_1008 (size=32) 2024-12-03T05:02:55,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741832_1008 (size=32) 2024-12-03T05:02:55,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:02:55,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T05:02:55,801 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T05:02:55,801 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:02:55,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:02:55,801 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T05:02:55,802 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T05:02:55,802 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:02:55,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:02:55,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T05:02:55,804 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T05:02:55,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:02:55,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:02:55,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T05:02:55,805 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T05:02:55,805 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:02:55,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:02:55,805 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T05:02:55,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/data/hbase/meta/1588230740 2024-12-03T05:02:55,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/data/hbase/meta/1588230740 2024-12-03T05:02:55,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T05:02:55,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T05:02:55,808 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T05:02:55,809 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T05:02:55,811 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T05:02:55,811 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=724866, jitterRate=-0.07828526198863983}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T05:02:55,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733202175799Initializing all the Stores at 1733202175799Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202175800 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202175800Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202175800Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202175800Cleaning up temporary data from old regions at 1733202175807 (+7 ms)Region opened successfully at 1733202175812 (+5 ms) 2024-12-03T05:02:55,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T05:02:55,812 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T05:02:55,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T05:02:55,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T05:02:55,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T05:02:55,812 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T05:02:55,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733202175812Disabling compacts and flushes for region at 1733202175812Disabling writes for close at 1733202175812Writing region close event to WAL at 1733202175812Closed at 1733202175812 2024-12-03T05:02:55,814 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T05:02:55,814 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-03T05:02:55,814 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T05:02:55,815 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T05:02:55,816 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T05:02:55,845 INFO [RS:0;32beccd76b35:42503 {}] regionserver.HRegionServer(746): ClusterId : 79c87186-cdb6-48fa-83ee-7b9fe0ce8ddb 2024-12-03T05:02:55,845 DEBUG [RS:0;32beccd76b35:42503 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T05:02:55,847 DEBUG [RS:0;32beccd76b35:42503 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T05:02:55,847 DEBUG [RS:0;32beccd76b35:42503 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T05:02:55,849 DEBUG [RS:0;32beccd76b35:42503 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T05:02:55,850 DEBUG [RS:0;32beccd76b35:42503 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5318ad6c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32beccd76b35/172.17.0.3:0 2024-12-03T05:02:55,862 DEBUG [RS:0;32beccd76b35:42503 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;32beccd76b35:42503 2024-12-03T05:02:55,862 INFO [RS:0;32beccd76b35:42503 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-03T05:02:55,862 INFO [RS:0;32beccd76b35:42503 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-03T05:02:55,862 DEBUG [RS:0;32beccd76b35:42503 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-03T05:02:55,862 INFO [RS:0;32beccd76b35:42503 {}] regionserver.HRegionServer(2659): reportForDuty to master=32beccd76b35,34985,1733202175582 with port=42503, startcode=1733202175631 2024-12-03T05:02:55,863 DEBUG [RS:0;32beccd76b35:42503 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T05:02:55,865 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59209, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T05:02:55,865 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34985 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 32beccd76b35,42503,1733202175631 2024-12-03T05:02:55,865 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34985 {}] master.ServerManager(517): Registering regionserver=32beccd76b35,42503,1733202175631 2024-12-03T05:02:55,866 DEBUG [RS:0;32beccd76b35:42503 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75 2024-12-03T05:02:55,866 DEBUG [RS:0;32beccd76b35:42503 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46313 2024-12-03T05:02:55,866 DEBUG [RS:0;32beccd76b35:42503 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-03T05:02:55,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T05:02:55,868 DEBUG [RS:0;32beccd76b35:42503 {}] zookeeper.ZKUtil(111): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/32beccd76b35,42503,1733202175631 2024-12-03T05:02:55,868 WARN [RS:0;32beccd76b35:42503 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T05:02:55,868 INFO [RS:0;32beccd76b35:42503 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T05:02:55,869 DEBUG [RS:0;32beccd76b35:42503 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/WALs/32beccd76b35,42503,1733202175631 2024-12-03T05:02:55,869 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [32beccd76b35,42503,1733202175631] 2024-12-03T05:02:55,872 INFO [RS:0;32beccd76b35:42503 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T05:02:55,874 INFO [RS:0;32beccd76b35:42503 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T05:02:55,874 INFO [RS:0;32beccd76b35:42503 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T05:02:55,874 INFO [RS:0;32beccd76b35:42503 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:55,876 INFO [RS:0;32beccd76b35:42503 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-03T05:02:55,877 INFO [RS:0;32beccd76b35:42503 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-03T05:02:55,877 INFO [RS:0;32beccd76b35:42503 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:55,877 DEBUG [RS:0;32beccd76b35:42503 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:02:55,877 DEBUG [RS:0;32beccd76b35:42503 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:02:55,877 DEBUG [RS:0;32beccd76b35:42503 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:02:55,877 DEBUG [RS:0;32beccd76b35:42503 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:02:55,877 DEBUG [RS:0;32beccd76b35:42503 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:02:55,877 DEBUG [RS:0;32beccd76b35:42503 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/32beccd76b35:0, corePoolSize=2, maxPoolSize=2 2024-12-03T05:02:55,877 DEBUG [RS:0;32beccd76b35:42503 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:02:55,877 DEBUG [RS:0;32beccd76b35:42503 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:02:55,877 DEBUG [RS:0;32beccd76b35:42503 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:02:55,877 DEBUG [RS:0;32beccd76b35:42503 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:02:55,877 DEBUG [RS:0;32beccd76b35:42503 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:02:55,877 DEBUG [RS:0;32beccd76b35:42503 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/32beccd76b35:0, corePoolSize=1, maxPoolSize=1 2024-12-03T05:02:55,877 DEBUG [RS:0;32beccd76b35:42503 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/32beccd76b35:0, corePoolSize=3, maxPoolSize=3 2024-12-03T05:02:55,878 DEBUG [RS:0;32beccd76b35:42503 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/32beccd76b35:0, corePoolSize=3, maxPoolSize=3 2024-12-03T05:02:55,878 INFO [RS:0;32beccd76b35:42503 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:55,878 INFO [RS:0;32beccd76b35:42503 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:55,878 INFO [RS:0;32beccd76b35:42503 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:55,878 INFO [RS:0;32beccd76b35:42503 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:55,878 INFO [RS:0;32beccd76b35:42503 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:55,878 INFO [RS:0;32beccd76b35:42503 {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,42503,1733202175631-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T05:02:55,893 INFO [RS:0;32beccd76b35:42503 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T05:02:55,893 INFO [RS:0;32beccd76b35:42503 {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,42503,1733202175631-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:55,893 INFO [RS:0;32beccd76b35:42503 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:55,893 INFO [RS:0;32beccd76b35:42503 {}] regionserver.Replication(171): 32beccd76b35,42503,1733202175631 started 2024-12-03T05:02:55,906 INFO [RS:0;32beccd76b35:42503 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:55,906 INFO [RS:0;32beccd76b35:42503 {}] regionserver.HRegionServer(1482): Serving as 32beccd76b35,42503,1733202175631, RpcServer on 32beccd76b35/172.17.0.3:42503, sessionid=0x101517959630001 2024-12-03T05:02:55,906 DEBUG [RS:0;32beccd76b35:42503 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T05:02:55,906 DEBUG [RS:0;32beccd76b35:42503 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 32beccd76b35,42503,1733202175631 2024-12-03T05:02:55,906 DEBUG [RS:0;32beccd76b35:42503 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32beccd76b35,42503,1733202175631' 2024-12-03T05:02:55,906 DEBUG [RS:0;32beccd76b35:42503 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T05:02:55,907 DEBUG [RS:0;32beccd76b35:42503 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T05:02:55,907 DEBUG [RS:0;32beccd76b35:42503 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T05:02:55,907 DEBUG [RS:0;32beccd76b35:42503 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T05:02:55,907 DEBUG [RS:0;32beccd76b35:42503 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 32beccd76b35,42503,1733202175631 2024-12-03T05:02:55,907 DEBUG [RS:0;32beccd76b35:42503 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32beccd76b35,42503,1733202175631' 2024-12-03T05:02:55,907 DEBUG [RS:0;32beccd76b35:42503 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T05:02:55,907 DEBUG [RS:0;32beccd76b35:42503 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T05:02:55,908 DEBUG [RS:0;32beccd76b35:42503 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T05:02:55,908 INFO [RS:0;32beccd76b35:42503 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T05:02:55,908 INFO [RS:0;32beccd76b35:42503 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T05:02:55,966 WARN [32beccd76b35:34985 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-03T05:02:56,010 INFO [RS:0;32beccd76b35:42503 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C42503%2C1733202175631, suffix=, logDir=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/WALs/32beccd76b35,42503,1733202175631, archiveDir=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/oldWALs, maxLogs=32 2024-12-03T05:02:56,010 INFO [RS:0;32beccd76b35:42503 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C42503%2C1733202175631.1733202176010 2024-12-03T05:02:56,015 INFO [RS:0;32beccd76b35:42503 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/WALs/32beccd76b35,42503,1733202175631/32beccd76b35%2C42503%2C1733202175631.1733202176010 2024-12-03T05:02:56,016 DEBUG [RS:0;32beccd76b35:42503 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36383:36383),(127.0.0.1/127.0.0.1:43977:43977)] 2024-12-03T05:02:56,216 DEBUG [32beccd76b35:34985 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T05:02:56,217 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=32beccd76b35,42503,1733202175631 2024-12-03T05:02:56,218 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 32beccd76b35,42503,1733202175631, state=OPENING 2024-12-03T05:02:56,219 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T05:02:56,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:56,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:56,221 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T05:02:56,221 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T05:02:56,221 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T05:02:56,221 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=32beccd76b35,42503,1733202175631}] 2024-12-03T05:02:56,374 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T05:02:56,376 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43877, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T05:02:56,379 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-03T05:02:56,379 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T05:02:56,380 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32beccd76b35%2C42503%2C1733202175631.meta, suffix=.meta, logDir=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/WALs/32beccd76b35,42503,1733202175631, archiveDir=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/oldWALs, maxLogs=32 2024-12-03T05:02:56,381 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 32beccd76b35%2C42503%2C1733202175631.meta.1733202176381.meta 2024-12-03T05:02:56,389 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/WALs/32beccd76b35,42503,1733202175631/32beccd76b35%2C42503%2C1733202175631.meta.1733202176381.meta 2024-12-03T05:02:56,392 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43977:43977),(127.0.0.1/127.0.0.1:36383:36383)] 2024-12-03T05:02:56,393 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T05:02:56,393 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T05:02:56,393 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T05:02:56,393 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T05:02:56,393 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T05:02:56,393 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T05:02:56,393 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-03T05:02:56,393 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-03T05:02:56,395 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T05:02:56,396 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T05:02:56,396 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:02:56,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:02:56,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-03T05:02:56,397 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-03T05:02:56,397 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:02:56,397 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:02:56,397 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T05:02:56,398 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T05:02:56,398 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:02:56,398 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:02:56,398 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T05:02:56,399 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T05:02:56,399 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T05:02:56,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T05:02:56,399 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-03T05:02:56,400 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/data/hbase/meta/1588230740 2024-12-03T05:02:56,401 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/data/hbase/meta/1588230740 2024-12-03T05:02:56,402 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-03T05:02:56,402 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-03T05:02:56,402 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T05:02:56,404 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-03T05:02:56,405 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722971, jitterRate=-0.08069559931755066}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T05:02:56,405 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-03T05:02:56,405 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733202176393Writing region info on filesystem at 1733202176393Initializing all the Stores at 1733202176395 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202176395Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202176395Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733202176395Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733202176395Cleaning up temporary data from old regions at 1733202176402 (+7 ms)Running coprocessor post-open hooks at 1733202176405 (+3 ms)Region opened successfully at 1733202176405 2024-12-03T05:02:56,406 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733202176374 2024-12-03T05:02:56,408 DEBUG [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T05:02:56,408 INFO [RS_OPEN_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-03T05:02:56,409 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=32beccd76b35,42503,1733202175631 2024-12-03T05:02:56,410 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 32beccd76b35,42503,1733202175631, state=OPEN 2024-12-03T05:02:56,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T05:02:56,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T05:02:56,416 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=32beccd76b35,42503,1733202175631 2024-12-03T05:02:56,416 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T05:02:56,416 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T05:02:56,419 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T05:02:56,419 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=32beccd76b35,42503,1733202175631 in 195 msec 2024-12-03T05:02:56,421 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T05:02:56,421 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 605 msec 2024-12-03T05:02:56,422 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-03T05:02:56,422 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-03T05:02:56,423 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T05:02:56,423 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=32beccd76b35,42503,1733202175631, seqNum=-1] 2024-12-03T05:02:56,424 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T05:02:56,425 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35687, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T05:02:56,430 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 655 msec 2024-12-03T05:02:56,430 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733202176430, completionTime=-1 2024-12-03T05:02:56,430 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T05:02:56,430 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-03T05:02:56,432 INFO [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-03T05:02:56,432 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733202236432 2024-12-03T05:02:56,432 INFO [master/32beccd76b35:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733202296432 2024-12-03T05:02:56,432 INFO [master/32beccd76b35:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-03T05:02:56,433 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,34985,1733202175582-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:56,433 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,34985,1733202175582-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:56,433 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,34985,1733202175582-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:56,433 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-32beccd76b35:34985, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:56,433 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:56,433 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:56,436 DEBUG [master/32beccd76b35:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-03T05:02:56,437 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.776sec 2024-12-03T05:02:56,437 INFO [master/32beccd76b35:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T05:02:56,437 INFO [master/32beccd76b35:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T05:02:56,437 INFO [master/32beccd76b35:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T05:02:56,437 INFO [master/32beccd76b35:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T05:02:56,437 INFO [master/32beccd76b35:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T05:02:56,438 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,34985,1733202175582-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T05:02:56,438 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,34985,1733202175582-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T05:02:56,440 DEBUG [master/32beccd76b35:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-03T05:02:56,440 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T05:02:56,440 INFO [master/32beccd76b35:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32beccd76b35,34985,1733202175582-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T05:02:56,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,44203,1733201924824/32beccd76b35%2C44203%2C1733201924824.meta.1733201925707.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:56,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38443/user/jenkins/test-data/9358cb7e-74ca-2aa7-6389-175e2d7d3fe0/WALs/32beccd76b35,38103,1733201925919/32beccd76b35%2C38103%2C1733201925919.1733201926132 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-03T05:02:56,446 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76839020, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T05:02:56,446 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 32beccd76b35,34985,-1 for getting cluster id 2024-12-03T05:02:56,446 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-03T05:02:56,447 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '79c87186-cdb6-48fa-83ee-7b9fe0ce8ddb' 2024-12-03T05:02:56,448 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-03T05:02:56,448 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "79c87186-cdb6-48fa-83ee-7b9fe0ce8ddb" 2024-12-03T05:02:56,448 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63bb83a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T05:02:56,448 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [32beccd76b35,34985,-1] 2024-12-03T05:02:56,448 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-03T05:02:56,449 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:02:56,450 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48704, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-03T05:02:56,450 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4667722b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T05:02:56,451 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-03T05:02:56,451 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=32beccd76b35,42503,1733202175631, seqNum=-1] 2024-12-03T05:02:56,452 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T05:02:56,453 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37490, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T05:02:56,454 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=32beccd76b35,34985,1733202175582 2024-12-03T05:02:56,454 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T05:02:56,456 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-03T05:02:56,456 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-03T05:02:56,458 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/WALs/test.com,8080,1, archiveDir=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/oldWALs, maxLogs=32 2024-12-03T05:02:56,459 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733202176459 2024-12-03T05:02:56,466 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/WALs/test.com,8080,1/test.com%2C8080%2C1.1733202176459 2024-12-03T05:02:56,468 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43977:43977),(127.0.0.1/127.0.0.1:36383:36383)] 2024-12-03T05:02:56,469 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733202176469 2024-12-03T05:02:56,473 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,473 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,473 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,473 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,473 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,473 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/WALs/test.com,8080,1/test.com%2C8080%2C1.1733202176459 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/WALs/test.com,8080,1/test.com%2C8080%2C1.1733202176469 2024-12-03T05:02:56,474 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36383:36383),(127.0.0.1/127.0.0.1:43977:43977)] 2024-12-03T05:02:56,474 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/WALs/test.com,8080,1/test.com%2C8080%2C1.1733202176459 is not closed yet, will try archiving it next time 2024-12-03T05:02:56,474 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,474 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,475 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741835_1011 (size=93) 2024-12-03T05:02:56,475 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,475 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741835_1011 (size=93) 2024-12-03T05:02:56,476 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/WALs/test.com,8080,1/test.com%2C8080%2C1.1733202176459 to hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/oldWALs/test.com%2C8080%2C1.1733202176459 2024-12-03T05:02:56,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741836_1012 (size=93) 2024-12-03T05:02:56,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741836_1012 (size=93) 2024-12-03T05:02:56,479 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/oldWALs 2024-12-03T05:02:56,479 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733202176469) 2024-12-03T05:02:56,479 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-03T05:02:56,479 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T05:02:56,479 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T05:02:56,479 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:02:56,479 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:02:56,479 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-03T05:02:56,480 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T05:02:56,480 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1744082799, stopped=false 2024-12-03T05:02:56,480 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=32beccd76b35,34985,1733202175582 2024-12-03T05:02:56,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T05:02:56,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T05:02:56,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:56,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:56,482 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T05:02:56,482 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-03T05:02:56,482 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T05:02:56,482 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:02:56,482 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '32beccd76b35,42503,1733202175631' ***** 2024-12-03T05:02:56,482 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-03T05:02:56,482 INFO [RS:0;32beccd76b35:42503 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T05:02:56,482 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T05:02:56,482 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T05:02:56,482 INFO [RS:0;32beccd76b35:42503 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T05:02:56,482 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-03T05:02:56,482 INFO [RS:0;32beccd76b35:42503 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T05:02:56,482 INFO [RS:0;32beccd76b35:42503 {}] regionserver.HRegionServer(959): stopping server 32beccd76b35,42503,1733202175631 2024-12-03T05:02:56,482 INFO [RS:0;32beccd76b35:42503 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T05:02:56,483 INFO [RS:0;32beccd76b35:42503 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;32beccd76b35:42503. 2024-12-03T05:02:56,483 DEBUG [RS:0;32beccd76b35:42503 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T05:02:56,483 DEBUG [RS:0;32beccd76b35:42503 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:02:56,483 INFO [RS:0;32beccd76b35:42503 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T05:02:56,483 INFO [RS:0;32beccd76b35:42503 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T05:02:56,483 INFO [RS:0;32beccd76b35:42503 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T05:02:56,483 INFO [RS:0;32beccd76b35:42503 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-03T05:02:56,483 INFO [RS:0;32beccd76b35:42503 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-03T05:02:56,483 DEBUG [RS:0;32beccd76b35:42503 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-03T05:02:56,483 DEBUG [RS:0;32beccd76b35:42503 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-03T05:02:56,483 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-03T05:02:56,483 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-03T05:02:56,483 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-03T05:02:56,483 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T05:02:56,483 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T05:02:56,484 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-03T05:02:56,499 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/data/hbase/meta/1588230740/.tmp/ns/1339c531337745fca54e9cc6d21e2f7a is 43, key is default/ns:d/1733202176425/Put/seqid=0 2024-12-03T05:02:56,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741837_1013 (size=5153) 2024-12-03T05:02:56,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741837_1013 (size=5153) 2024-12-03T05:02:56,503 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/data/hbase/meta/1588230740/.tmp/ns/1339c531337745fca54e9cc6d21e2f7a 2024-12-03T05:02:56,507 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/data/hbase/meta/1588230740/.tmp/ns/1339c531337745fca54e9cc6d21e2f7a as hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/data/hbase/meta/1588230740/ns/1339c531337745fca54e9cc6d21e2f7a 2024-12-03T05:02:56,512 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/data/hbase/meta/1588230740/ns/1339c531337745fca54e9cc6d21e2f7a, entries=2, sequenceid=6, filesize=5.0 K 2024-12-03T05:02:56,512 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false 2024-12-03T05:02:56,512 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T05:02:56,516 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T05:02:56,516 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T05:02:56,516 INFO [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-03T05:02:56,517 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733202176483Running coprocessor pre-close hooks at 1733202176483Disabling compacts and flushes for region at 1733202176483Disabling writes for close at 1733202176483Obtaining lock to block concurrent updates at 1733202176484 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733202176484Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733202176484Flushing stores of hbase:meta,,1.1588230740 at 1733202176484Flushing 1588230740/ns: creating writer at 1733202176485 (+1 ms)Flushing 1588230740/ns: appending metadata at 1733202176498 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733202176498Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20534d27: reopening flushed file at 1733202176507 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false at 1733202176512 (+5 ms)Writing region close event to WAL at 1733202176513 (+1 ms)Running coprocessor post-close hooks at 1733202176516 (+3 ms)Closed at 1733202176516 2024-12-03T05:02:56,517 DEBUG [RS_CLOSE_META-regionserver/32beccd76b35:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T05:02:56,683 INFO [RS:0;32beccd76b35:42503 {}] regionserver.HRegionServer(976): stopping server 32beccd76b35,42503,1733202175631; all regions closed. 2024-12-03T05:02:56,684 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,684 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,684 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,684 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,684 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741834_1010 (size=1152) 2024-12-03T05:02:56,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741834_1010 (size=1152) 2024-12-03T05:02:56,689 DEBUG [RS:0;32beccd76b35:42503 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/oldWALs 2024-12-03T05:02:56,689 INFO [RS:0;32beccd76b35:42503 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 32beccd76b35%2C42503%2C1733202175631.meta:.meta(num 1733202176381) 2024-12-03T05:02:56,689 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,689 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,689 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,689 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,690 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741833_1009 (size=93) 2024-12-03T05:02:56,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741833_1009 (size=93) 2024-12-03T05:02:56,693 DEBUG [RS:0;32beccd76b35:42503 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/oldWALs 2024-12-03T05:02:56,693 INFO [RS:0;32beccd76b35:42503 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 32beccd76b35%2C42503%2C1733202175631:(num 1733202176010) 2024-12-03T05:02:56,693 DEBUG [RS:0;32beccd76b35:42503 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T05:02:56,693 INFO [RS:0;32beccd76b35:42503 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T05:02:56,693 INFO [RS:0;32beccd76b35:42503 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T05:02:56,693 INFO [RS:0;32beccd76b35:42503 {}] hbase.ChoreService(370): Chore service for: regionserver/32beccd76b35:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-03T05:02:56,694 INFO [RS:0;32beccd76b35:42503 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T05:02:56,694 INFO [regionserver/32beccd76b35:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T05:02:56,694 INFO [RS:0;32beccd76b35:42503 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42503 2024-12-03T05:02:56,696 INFO [RS:0;32beccd76b35:42503 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T05:02:56,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T05:02:56,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/32beccd76b35,42503,1733202175631 2024-12-03T05:02:56,697 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [32beccd76b35,42503,1733202175631] 2024-12-03T05:02:56,698 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/32beccd76b35,42503,1733202175631 already deleted, retry=false 2024-12-03T05:02:56,698 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 32beccd76b35,42503,1733202175631 expired; onlineServers=0 2024-12-03T05:02:56,698 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '32beccd76b35,34985,1733202175582' ***** 2024-12-03T05:02:56,698 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T05:02:56,698 INFO [M:0;32beccd76b35:34985 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-03T05:02:56,698 INFO [M:0;32beccd76b35:34985 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-03T05:02:56,698 DEBUG [M:0;32beccd76b35:34985 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T05:02:56,698 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T05:02:56,698 DEBUG [M:0;32beccd76b35:34985 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T05:02:56,698 DEBUG [master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733202175782 {}] cleaner.HFileCleaner(306): Exit Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.large.0-1733202175782,5,FailOnTimeoutGroup] 2024-12-03T05:02:56,698 DEBUG [master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733202175782 {}] cleaner.HFileCleaner(306): Exit Thread[master/32beccd76b35:0:becomeActiveMaster-HFileCleaner.small.0-1733202175782,5,FailOnTimeoutGroup] 2024-12-03T05:02:56,699 INFO [M:0;32beccd76b35:34985 {}] hbase.ChoreService(370): Chore service for: master/32beccd76b35:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-03T05:02:56,699 INFO [M:0;32beccd76b35:34985 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-03T05:02:56,699 DEBUG [M:0;32beccd76b35:34985 {}] master.HMaster(1795): Stopping service threads 2024-12-03T05:02:56,699 INFO [M:0;32beccd76b35:34985 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T05:02:56,699 INFO [M:0;32beccd76b35:34985 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-03T05:02:56,699 INFO [M:0;32beccd76b35:34985 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T05:02:56,699 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T05:02:56,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T05:02:56,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T05:02:56,700 DEBUG [M:0;32beccd76b35:34985 {}] zookeeper.ZKUtil(347): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T05:02:56,700 WARN [M:0;32beccd76b35:34985 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T05:02:56,700 INFO [M:0;32beccd76b35:34985 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/.lastflushedseqids 2024-12-03T05:02:56,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741838_1014 (size=108) 2024-12-03T05:02:56,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741838_1014 (size=108) 2024-12-03T05:02:56,706 INFO [M:0;32beccd76b35:34985 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-03T05:02:56,706 INFO [M:0;32beccd76b35:34985 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T05:02:56,706 DEBUG [M:0;32beccd76b35:34985 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T05:02:56,706 INFO [M:0;32beccd76b35:34985 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:02:56,706 DEBUG [M:0;32beccd76b35:34985 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:02:56,706 DEBUG [M:0;32beccd76b35:34985 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T05:02:56,706 DEBUG [M:0;32beccd76b35:34985 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:02:56,706 INFO [M:0;32beccd76b35:34985 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-03T05:02:56,723 DEBUG [M:0;32beccd76b35:34985 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b0103d4e6ea649aab83f8694fd210ac3 is 82, key is hbase:meta,,1/info:regioninfo/1733202176409/Put/seqid=0 2024-12-03T05:02:56,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741839_1015 (size=5672) 2024-12-03T05:02:56,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741839_1015 (size=5672) 2024-12-03T05:02:56,728 INFO [M:0;32beccd76b35:34985 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b0103d4e6ea649aab83f8694fd210ac3 2024-12-03T05:02:56,747 DEBUG [M:0;32beccd76b35:34985 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a4b618deaf7a4a9e8a65b4002a655ca1 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733202176429/Put/seqid=0 2024-12-03T05:02:56,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741840_1016 (size=5275) 2024-12-03T05:02:56,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741840_1016 (size=5275) 2024-12-03T05:02:56,752 INFO [M:0;32beccd76b35:34985 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a4b618deaf7a4a9e8a65b4002a655ca1 2024-12-03T05:02:56,773 DEBUG [M:0;32beccd76b35:34985 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7932413010ec40bfa20e632f600591af is 69, key is 32beccd76b35,42503,1733202175631/rs:state/1733202175865/Put/seqid=0 2024-12-03T05:02:56,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741841_1017 (size=5156) 2024-12-03T05:02:56,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741841_1017 (size=5156) 2024-12-03T05:02:56,783 INFO [M:0;32beccd76b35:34985 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7932413010ec40bfa20e632f600591af 2024-12-03T05:02:56,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T05:02:56,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42503-0x101517959630001, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T05:02:56,798 INFO [RS:0;32beccd76b35:42503 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T05:02:56,798 INFO [RS:0;32beccd76b35:42503 {}] regionserver.HRegionServer(1031): Exiting; stopping=32beccd76b35,42503,1733202175631; zookeeper connection closed. 2024-12-03T05:02:56,799 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@37ef3bf1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@37ef3bf1 2024-12-03T05:02:56,799 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T05:02:56,809 DEBUG [M:0;32beccd76b35:34985 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4343dbb7dbb54530bf399ea56572773e is 52, key is load_balancer_on/state:d/1733202176455/Put/seqid=0 2024-12-03T05:02:56,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741842_1018 (size=5056) 2024-12-03T05:02:56,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741842_1018 (size=5056) 2024-12-03T05:02:56,815 INFO [M:0;32beccd76b35:34985 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4343dbb7dbb54530bf399ea56572773e 2024-12-03T05:02:56,821 DEBUG [M:0;32beccd76b35:34985 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b0103d4e6ea649aab83f8694fd210ac3 as hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b0103d4e6ea649aab83f8694fd210ac3 2024-12-03T05:02:56,825 INFO [M:0;32beccd76b35:34985 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b0103d4e6ea649aab83f8694fd210ac3, entries=8, sequenceid=29, filesize=5.5 K 2024-12-03T05:02:56,826 DEBUG [M:0;32beccd76b35:34985 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a4b618deaf7a4a9e8a65b4002a655ca1 as hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a4b618deaf7a4a9e8a65b4002a655ca1 2024-12-03T05:02:56,830 INFO [M:0;32beccd76b35:34985 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a4b618deaf7a4a9e8a65b4002a655ca1, entries=3, sequenceid=29, filesize=5.2 K 2024-12-03T05:02:56,831 DEBUG [M:0;32beccd76b35:34985 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7932413010ec40bfa20e632f600591af as hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7932413010ec40bfa20e632f600591af 2024-12-03T05:02:56,835 INFO [M:0;32beccd76b35:34985 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7932413010ec40bfa20e632f600591af, entries=1, sequenceid=29, filesize=5.0 K 2024-12-03T05:02:56,836 DEBUG [M:0;32beccd76b35:34985 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4343dbb7dbb54530bf399ea56572773e as hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4343dbb7dbb54530bf399ea56572773e 2024-12-03T05:02:56,840 INFO [M:0;32beccd76b35:34985 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46313/user/jenkins/test-data/dccf52da-9a1f-aaa4-e7ed-33c373146f75/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4343dbb7dbb54530bf399ea56572773e, entries=1, sequenceid=29, filesize=4.9 K 2024-12-03T05:02:56,841 INFO [M:0;32beccd76b35:34985 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=29, compaction requested=false 2024-12-03T05:02:56,843 INFO [M:0;32beccd76b35:34985 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T05:02:56,843 DEBUG [M:0;32beccd76b35:34985 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733202176706Disabling compacts and flushes for region at 1733202176706Disabling writes for close at 1733202176706Obtaining lock to block concurrent updates at 1733202176706Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733202176706Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733202176706Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733202176707 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733202176707Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733202176723 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733202176723Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733202176732 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733202176747 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733202176747Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733202176757 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733202176772 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733202176772Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733202176788 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733202176809 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733202176809Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39c722a6: reopening flushed file at 1733202176820 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@565eb0d7: reopening flushed file at 1733202176825 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ebbbd15: reopening flushed file at 1733202176831 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35e32f0a: reopening flushed file at 1733202176836 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=29, compaction requested=false at 1733202176841 (+5 ms)Writing region close event to WAL at 1733202176843 (+2 ms)Closed at 1733202176843 2024-12-03T05:02:56,843 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,843 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,844 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,844 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,844 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-03T05:02:56,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44997 is added to blk_1073741830_1006 (size=10311) 2024-12-03T05:02:56,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741830_1006 (size=10311) 2024-12-03T05:02:57,247 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-03T05:02:57,247 INFO [M:0;32beccd76b35:34985 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-03T05:02:57,247 INFO [M:0;32beccd76b35:34985 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34985 2024-12-03T05:02:57,248 INFO [M:0;32beccd76b35:34985 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-03T05:02:57,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T05:02:57,351 INFO [M:0;32beccd76b35:34985 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-03T05:02:57,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34985-0x101517959630000, quorum=127.0.0.1:54029, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T05:02:57,366 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26b5df9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T05:02:57,367 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6d371f78{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T05:02:57,367 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T05:02:57,367 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@751df37f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T05:02:57,367 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6ea1bbf3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/hadoop.log.dir/,STOPPED} 2024-12-03T05:02:57,374 WARN [BP-1834779685-172.17.0.3-1733202174340 heartbeating to localhost/127.0.0.1:46313 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T05:02:57,374 WARN [BP-1834779685-172.17.0.3-1733202174340 heartbeating to localhost/127.0.0.1:46313 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1834779685-172.17.0.3-1733202174340 (Datanode Uuid 132b9318-0011-440b-b088-e1556f180ba4) service to localhost/127.0.0.1:46313 2024-12-03T05:02:57,374 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/cluster_2173fbb5-6706-d303-ecd6-3f263f224d80/data/data3/current/BP-1834779685-172.17.0.3-1733202174340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T05:02:57,375 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T05:02:57,375 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T05:02:57,375 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T05:02:57,377 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/cluster_2173fbb5-6706-d303-ecd6-3f263f224d80/data/data4/current/BP-1834779685-172.17.0.3-1733202174340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T05:02:57,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7fb436c5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T05:02:57,379 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@59a2b0cf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T05:02:57,379 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T05:02:57,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1917abb0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T05:02:57,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ddf6979{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/hadoop.log.dir/,STOPPED} 2024-12-03T05:02:57,384 WARN [BP-1834779685-172.17.0.3-1733202174340 heartbeating to localhost/127.0.0.1:46313 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T05:02:57,384 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T05:02:57,384 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T05:02:57,384 WARN [BP-1834779685-172.17.0.3-1733202174340 heartbeating to localhost/127.0.0.1:46313 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1834779685-172.17.0.3-1733202174340 (Datanode Uuid f3730e1e-719b-4219-ab73-759633a6cb5f) service to localhost/127.0.0.1:46313 2024-12-03T05:02:57,384 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/cluster_2173fbb5-6706-d303-ecd6-3f263f224d80/data/data1/current/BP-1834779685-172.17.0.3-1733202174340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T05:02:57,385 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/cluster_2173fbb5-6706-d303-ecd6-3f263f224d80/data/data2/current/BP-1834779685-172.17.0.3-1733202174340 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T05:02:57,385 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T05:02:57,393 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3cb23f43{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T05:02:57,394 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1d180ecd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T05:02:57,394 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T05:02:57,394 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@455a32b8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T05:02:57,394 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@610df049{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/3d7be91b-b897-a0ee-91ac-e62031b8ca03/hadoop.log.dir/,STOPPED} 2024-12-03T05:02:57,401 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-03T05:02:57,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-03T05:02:57,436 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=266 (was 227) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46313 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:46313 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46313 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:46313 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46313 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:46313 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46313 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1647223992) connection to localhost/127.0.0.1:46313 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=535 (was 506) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=535 (was 547), ProcessCount=11 (was 11), AvailableMemoryMB=3709 (was 3965)